hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
7c66109d94daa6222733c5eaad4d4f74b1c86581.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_v2_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename IdT, bool PaddingFlag>
__global__ void LookupTableV2(T *output, const T *table, const IdT *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += blockDim.x) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += blockDim.y * gridDim.x;
}
}
template <typename T, typename IdT>
__global__ void LookupTableV2Grad(T *table, const T *output, const IdT *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
const T *out = output + idy * D;
T *tab = table + id * D;
#ifdef PADDLE_WITH_CUDA
paddle::platform::VectorizedAtomicAddPerBlock(D, idx, blockDim.x, out, tab);
#else
for (int i = idx; i < D; i += blockDim.x) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
#endif
idy += blockDim.y * gridDim.x;
}
}
template <typename T>
struct LookupTableV2CUDAFunctor {
LookupTableV2CUDAFunctor(const framework::ExecutionContext &context,
const framework::Tensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto *table_t = context_.Input<framework::Tensor>("W");
auto *output_t = context_.Output<framework::Tensor>("Out");
int64_t padding_idx = context_.Attr<int64_t>("padding_idx");
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t_->numel();
const int gridx = 2 * context_.cuda_device_context().GetSMCount();
dim3 threads(256, 4);
dim3 grids(gridx, 1);
const auto *table = table_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
auto *output = output_t->template mutable_data<T>(context_.GetPlace());
auto stream = context_.cuda_device_context().stream();
if (padding_idx == -1) {
hipLaunchKernelGGL(( LookupTableV2<T, IdT, false>), dim3(grids), dim3(threads), 0, stream,
output, table, ids, N, K, D, padding_idx);
} else {
hipLaunchKernelGGL(( LookupTableV2<T, IdT, true>), dim3(grids), dim3(threads), 0, stream,
output, table, ids, N, K, D, padding_idx);
}
}
private:
const framework::ExecutionContext &context_;
const framework::Tensor *ids_t_;
};
template <typename T>
class LookupTableV2CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<framework::Tensor>("Ids");
LookupTableV2CUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
template <typename InT, typename OutT>
__global__ void InputTypeConvert(const InT *in_ids, const int64_t K,
OutT *out_ids) {
for (int i = 0; i < K; i++) {
out_ids[i] = static_cast<OutT>(in_ids[i]);
}
}
template <typename T>
struct LookupTableV2GradCUDAFunctor {
LookupTableV2GradCUDAFunctor(const framework::ExecutionContext &context,
const framework::Tensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto &dev_ctx =
context_.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context_.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *table = context_.Input<framework::Tensor>("W");
auto *d_output =
context_.Input<framework::Tensor>(framework::GradVarName("Out"));
auto *d_table =
context_.Output<phi::SelectedRows>(framework::GradVarName("W"));
const auto *ids_data = ids_t_->template data<IdT>();
int64_t ids_num = ids_t_->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
auto stream = dev_ctx.stream();
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = context_.GetPlace();
paddle::framework::MixVector<int64_t> mixv_new_rows(&new_rows);
if (!std::is_same<IdT, int64_t>::value) {
hipLaunchKernelGGL(( InputTypeConvert), dim3(grids), dim3(threads), 0, stream,
ids_data, ids_num, mixv_new_rows.MutableData(gpu_place));
} else {
memory::Copy(gpu_place, mixv_new_rows.CUDAMutableData(gpu_place),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
}
mixv_new_rows.CopyToCPU();
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->template mutable_data<T>(gpu_place);
auto *d_table_data = d_table_value->template data<T>();
auto *d_output_data = d_output->template data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto d_output_t =
context_.Input<framework::Tensor>(framework::GradVarName("Out"));
auto d_table_t =
context_.Output<framework::Tensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t_->numel();
const T *d_output = d_output_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
T *d_table = d_table_t->mutable_data<T>(context_.GetPlace());
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#endif
const int gridx = 2 * dev_ctx.GetSMCount();
dim3 threads(128, 8);
dim3 grids(gridx, 1);
hipLaunchKernelGGL(( LookupTableV2Grad<T, IdT>), dim3(grids), dim3(threads), 0, dev_ctx.stream(),
d_table, d_output, ids, N, K, D);
}
}
private:
const framework::ExecutionContext &context_;
const framework::Tensor *ids_t_;
};
template <typename T>
class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<framework::Tensor>("Ids");
LookupTableV2GradCUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
} // namespace operators
} // namespace paddle
|
7c66109d94daa6222733c5eaad4d4f74b1c86581.cu
|
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/lookup_table_v2_op.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
#include "paddle/fluid/platform/float16.h"
namespace paddle {
namespace operators {
template <typename T, typename IdT, bool PaddingFlag>
__global__ void LookupTableV2(T *output, const T *table, const IdT *ids,
const int64_t N, const int64_t K, const int64_t D,
const int64_t padding_idx) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
T *out = output + idy * D;
const T *tab = table + id * D;
for (int i = idx; i < D; i += blockDim.x) {
if (PaddingFlag) {
if (id == padding_idx)
out[i] = static_cast<T>(0);
else
out[i] = tab[i];
} else {
out[i] = tab[i];
}
}
idy += blockDim.y * gridDim.x;
}
}
template <typename T, typename IdT>
__global__ void LookupTableV2Grad(T *table, const T *output, const IdT *ids,
const int64_t N, const int64_t K,
const int64_t D) {
int idx = threadIdx.x;
int idy = blockIdx.x + threadIdx.y * gridDim.x;
while (idy < K) {
auto id = static_cast<int64_t>(ids[idy]);
const T *out = output + idy * D;
T *tab = table + id * D;
#ifdef PADDLE_WITH_CUDA
paddle::platform::VectorizedAtomicAddPerBlock(D, idx, blockDim.x, out, tab);
#else
for (int i = idx; i < D; i += blockDim.x) {
paddle::platform::CudaAtomicAdd(&tab[i], out[i]);
}
#endif
idy += blockDim.y * gridDim.x;
}
}
template <typename T>
struct LookupTableV2CUDAFunctor {
LookupTableV2CUDAFunctor(const framework::ExecutionContext &context,
const framework::Tensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto *table_t = context_.Input<framework::Tensor>("W");
auto *output_t = context_.Output<framework::Tensor>("Out");
int64_t padding_idx = context_.Attr<int64_t>("padding_idx");
size_t N = table_t->dims()[0];
size_t D = table_t->dims()[1];
size_t K = ids_t_->numel();
const int gridx = 2 * context_.cuda_device_context().GetSMCount();
dim3 threads(256, 4);
dim3 grids(gridx, 1);
const auto *table = table_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
auto *output = output_t->template mutable_data<T>(context_.GetPlace());
auto stream = context_.cuda_device_context().stream();
if (padding_idx == -1) {
LookupTableV2<T, IdT, false><<<grids, threads, 0, stream>>>(
output, table, ids, N, K, D, padding_idx);
} else {
LookupTableV2<T, IdT, true><<<grids, threads, 0, stream>>>(
output, table, ids, N, K, D, padding_idx);
}
}
private:
const framework::ExecutionContext &context_;
const framework::Tensor *ids_t_;
};
template <typename T>
class LookupTableV2CUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<framework::Tensor>("Ids");
LookupTableV2CUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
template <typename InT, typename OutT>
__global__ void InputTypeConvert(const InT *in_ids, const int64_t K,
OutT *out_ids) {
for (int i = 0; i < K; i++) {
out_ids[i] = static_cast<OutT>(in_ids[i]);
}
}
template <typename T>
struct LookupTableV2GradCUDAFunctor {
LookupTableV2GradCUDAFunctor(const framework::ExecutionContext &context,
const framework::Tensor *ids_t)
: context_(context), ids_t_(ids_t) {}
template <typename IdT>
void apply() {
auto &dev_ctx =
context_.template device_context<platform::CUDADeviceContext>();
bool is_sparse = context_.Attr<bool>("is_sparse");
// Since paddings are not trainable and fixed in forward, the gradient of
// paddings makes no sense and we don't deal with it in backward.
if (is_sparse) {
auto *table = context_.Input<framework::Tensor>("W");
auto *d_output =
context_.Input<framework::Tensor>(framework::GradVarName("Out"));
auto *d_table =
context_.Output<phi::SelectedRows>(framework::GradVarName("W"));
const auto *ids_data = ids_t_->template data<IdT>();
int64_t ids_num = ids_t_->numel();
dim3 threads(128, 8);
dim3 grids(8, 1);
auto stream = dev_ctx.stream();
framework::Vector<int64_t> new_rows;
new_rows.resize(ids_num);
auto gpu_place = context_.GetPlace();
paddle::framework::MixVector<int64_t> mixv_new_rows(&new_rows);
if (!std::is_same<IdT, int64_t>::value) {
InputTypeConvert<<<grids, threads, 0, stream>>>(
ids_data, ids_num, mixv_new_rows.MutableData(gpu_place));
} else {
memory::Copy(gpu_place, mixv_new_rows.CUDAMutableData(gpu_place),
gpu_place, ids_data, ids_num * sizeof(int64_t), stream);
}
mixv_new_rows.CopyToCPU();
d_table->set_rows(new_rows);
auto *d_table_value = d_table->mutable_value();
d_table_value->Resize({ids_num, table->dims()[1]});
d_table_value->template mutable_data<T>(gpu_place);
auto *d_table_data = d_table_value->template data<T>();
auto *d_output_data = d_output->template data<T>();
auto d_output_dims = d_output->dims();
auto d_output_dims_2d =
phi::flatten_to_2d(d_output_dims, d_output_dims.size() - 1);
PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output_dims_2d,
platform::errors::InvalidArgument(
"ShapeError: The shape of lookup_table@Grad and "
"output@Grad should be same. "
"But received lookup_table@Grad's shape = [%s], "
"output@Grad's shape = [%s].",
d_table_value->dims(), d_output_dims_2d));
memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data,
d_output->numel() * sizeof(T), stream);
} else {
auto d_output_t =
context_.Input<framework::Tensor>(framework::GradVarName("Out"));
auto d_table_t =
context_.Output<framework::Tensor>(framework::GradVarName("W"));
int N = d_table_t->dims()[0];
int D = d_table_t->dims()[1];
int K = ids_t_->numel();
const T *d_output = d_output_t->template data<T>();
const auto *ids = ids_t_->template data<IdT>();
T *d_table = d_table_t->mutable_data<T>(context_.GetPlace());
#ifdef PADDLE_WITH_HIP
PADDLE_ENFORCE_GPU_SUCCESS(
hipMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#else
PADDLE_ENFORCE_GPU_SUCCESS(
cudaMemsetAsync(d_table, 0, N * D * sizeof(T), dev_ctx.stream()));
#endif
const int gridx = 2 * dev_ctx.GetSMCount();
dim3 threads(128, 8);
dim3 grids(gridx, 1);
LookupTableV2Grad<T, IdT><<<grids, threads, 0, dev_ctx.stream()>>>(
d_table, d_output, ids, N, K, D);
}
}
private:
const framework::ExecutionContext &context_;
const framework::Tensor *ids_t_;
};
template <typename T>
class LookupTableV2GradCUDAKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext &context) const override {
const auto *ids_t = context.Input<framework::Tensor>("Ids");
LookupTableV2GradCUDAFunctor<T> functor(context, ids_t);
framework::VisitIntDataType(framework::TransToProtoVarType(ids_t->dtype()),
functor);
}
};
} // namespace operators
} // namespace paddle
|
d4273717bae873157904953fb2376c084d832b79.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<string>
#include <sstream>
#include<ctime>
#include <chrono>
#include<fstream>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/system/hip/experimental/pinned_allocator.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include<stack>
#include<vector>
#include "frequent_items.h"
#include "projected_database.h"
#include "prefix_span.h"
using namespace std;
fstream _file;
vector < vector<int> > sequential_patterns;
int main() {
int found, cnt = 0, total_row = 0;
string line, temp;
clock_t startt,endt;
thrust::host_vector<int, thrust::hip::experimental::pinned_allocator<int>> Hdata;
thrust::host_vector<int, thrust::hip::experimental::pinned_allocator<int>> Hstart;
thrust::host_vector<int, thrust::hip::experimental::pinned_allocator<int>> Hend;
thrust::device_vector<int> device_data;
thrust::device_vector<int> start(total_row);
thrust::device_vector<int> end(total_row);
freopen("data.out","w",stdout);
ifstream file;
file.open("MSNBC_SPMF.txt");
if (!file) {
cout << "file not found \n";
return -1;
}
// auto startt = chrono::steady_clock::now();
while (getline(file, line))
{
istringstream ss(line);
// int* ptr = raw_pointer_cast(&data[total_row]);
Hstart.push_back(cnt);
while (ss >> temp)
{
if (stringstream(temp) >> found) {
Hdata.push_back(found);
cnt++;
}
}
total_row++;
Hend.push_back(cnt);
}
file.close();
device_data=Hdata;
start=Hstart;
end=Hend;
int* dptr = raw_pointer_cast(&device_data[0]);
int* startPtr = raw_pointer_cast(&start[0]);
int* endPtr = raw_pointer_cast(&end[0]);
startt = clock();
prefix_Span(dptr, startPtr, endPtr, total_row,sequential_patterns);
// auto endt = chrono::steady_clock::now();
// auto diff = endt - startt;
//cout << chrono::duration <double, milli>(diff).count() << " ms" << endl;
endt = clock();
double interval = (double)(endt - startt) / CLOCKS_PER_SEC;
cout<<"cost time : " <<interval<<endl;
for (int i = 0; i < sequential_patterns.size(); i++) {
for (int j = 0; j < sequential_patterns[i].size(); j++) {
cout << sequential_patterns[i][j] << " ";
}
cout << endl;
}
gpuErrchk(hipDeviceReset());
}
|
d4273717bae873157904953fb2376c084d832b79.cu
|
#include<string>
#include <sstream>
#include<ctime>
#include <chrono>
#include<fstream>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/system/cuda/experimental/pinned_allocator.h>
#include <thrust/execution_policy.h>
#include <thrust/copy.h>
#include<stack>
#include<vector>
#include "frequent_items.h"
#include "projected_database.h"
#include "prefix_span.h"
using namespace std;
fstream _file;
vector < vector<int> > sequential_patterns;
int main() {
int found, cnt = 0, total_row = 0;
string line, temp;
clock_t startt,endt;
thrust::host_vector<int, thrust::cuda::experimental::pinned_allocator<int>> Hdata;
thrust::host_vector<int, thrust::cuda::experimental::pinned_allocator<int>> Hstart;
thrust::host_vector<int, thrust::cuda::experimental::pinned_allocator<int>> Hend;
thrust::device_vector<int> device_data;
thrust::device_vector<int> start(total_row);
thrust::device_vector<int> end(total_row);
freopen("data.out","w",stdout);
ifstream file;
file.open("MSNBC_SPMF.txt");
if (!file) {
cout << "file not found \n";
return -1;
}
// auto startt = chrono::steady_clock::now();
while (getline(file, line))
{
istringstream ss(line);
// int* ptr = raw_pointer_cast(&data[total_row]);
Hstart.push_back(cnt);
while (ss >> temp)
{
if (stringstream(temp) >> found) {
Hdata.push_back(found);
cnt++;
}
}
total_row++;
Hend.push_back(cnt);
}
file.close();
device_data=Hdata;
start=Hstart;
end=Hend;
int* dptr = raw_pointer_cast(&device_data[0]);
int* startPtr = raw_pointer_cast(&start[0]);
int* endPtr = raw_pointer_cast(&end[0]);
startt = clock();
prefix_Span(dptr, startPtr, endPtr, total_row,sequential_patterns);
// auto endt = chrono::steady_clock::now();
// auto diff = endt - startt;
//cout << chrono::duration <double, milli>(diff).count() << " ms" << endl;
endt = clock();
double interval = (double)(endt - startt) / CLOCKS_PER_SEC;
cout<<"cost time : " <<interval<<endl;
for (int i = 0; i < sequential_patterns.size(); i++) {
for (int j = 0; j < sequential_patterns[i].size(); j++) {
cout << sequential_patterns[i][j] << " ";
}
cout << endl;
}
gpuErrchk(cudaDeviceReset());
}
|
gZeus.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gZeus.h"
/*<<<<<<<<<<<<<<<<<<<<<<<<<<<<< start: variables in device memory <<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
//extern __shared__ int sharedMem[];
__constant__ int NBatch;
__constant__ int NStackDepth;
__constant__ int FixedSplit = 1;
__constant__ int ForwardDetect = 1; //whether do forward advancing movement when guidling in the photon
__constant__ int NMaxSplit = 50;
__constant__ int SIMU_ELECTRON = 0;
__constant__ ZFloat EAbsPhoton = GlueF(50e3);
__constant__ ZFloat EAbsElectron = GlueF(50e3);
__constant__ ZFloat ERangeCut = GlueF(10e3);
__constant__ ZFloat EMaxCSDA = GlueF(200e3);
#include "WaterCS.h"
//}}
//{{ pointers to accelerating access in the device
__constant__ ParticleR* InitPars;
__constant__ ParticleR* InitParsA;
__constant__ ParticleR* InitParsB;
__constant__ ParticleR* StackBuff;//memory pointer for stacks
__constant__ GRNG* RNGState;
//}}
//{{ data for phantom
__constant__ int NX, NY, NZ; //voxel number
__constant__ ZFloat DX, DY, DZ; // voxel size, unit cm
__constant__ ZFloat InvDX, InvDY, InvDZ;
__constant__ ZFloat LX, LY, LZ; // side length Lx=DX*NX
__constant__ ZFloat xo, yo, zo;
__constant__ ZFloat MaxDensity;
__constant__ ZFloat Bx, By, Bz; //unit magnetic field direction
__constant__ ZFloat rf;
__constant__ int uniform;
__constant__ SFloat* doseScore; //pointer to dose counter
__constant__ SFloat* ph; //pointer to phantom
Texture1Ddata(SFloat, Phant)
//}}
/*>>>>>>>>>>>>>>>>>>>>>>>>> end: variables in device memory >>>>>>>>>>>>>>>>>>>>>>>>>>>*/
#if __CUDACC_VER_MAJOR__ < 8
//just provide double float version of atomicAdd in case we need it.
__device__ __forceinline__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ void exitKernel(const char inf[])
{
printf("error: %s\n\n", inf);
asm("trap;");
}
/*<<<<<<<<<<<<<<<<<<<<<<<<<<<<< start: phantom method definitions <<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
__device__ ZFloat getDensity(int iabsv)
{
if (uniform) return MaxDensity;
else return ph[iabsv];
//else return getPhant(iabsv);
}
__device__ __forceinline__ ZFloat getDensity(ParticleR& p)
{
return getDensity(p.iabsv);
}
__device__ __forceinline__ void deposit(int iabsv, ZFloat DE)
{
atomicAdd(doseScore + iabsv, DE);
}
__device__ __forceinline__ bool lineInPhantom(ParticleR& p)
{
//first translate the coordinate system
p.x -= xo;
p.y -= yo;
p.z -= zo;
//assuming the incident particle go straight line, judge if it will enter the phantom.
//if true, give the interaction position
const ZFloat Delta = GlueF(1e-5);
if (p.x < 0 || p.x >= LX || p.y < 0 || p.y >= LY || p.z < 0 || p.z >= LZ) //initial position lays outside the phantom
{
if (p.x < 0)
{
//now it's outside the phantom
if (p.u > 0)
{
ZFloat t = p.x / p.u;
p.x = 0;
p.y -= t*p.v;
p.z -= t*p.w;
if (0 <= p.y &&p.y < LY && 0 <= p.z&&p.z < LZ) return true;
}
else return false;
}
else if (p.x >= LX)
{
if (p.u < 0)
{
ZFloat t = (LX - Delta - p.x) / p.u;
p.x = LX - Delta;
p.y += t*p.v;
p.z += t*p.w;
if (0 <= p.y &&p.y < LY && 0 <= p.z&&p.z < LZ) return true;
}
else return false;
}
if (p.y < 0)
{
//now it's outside the phantom
if (p.v > 0)
{
ZFloat t = p.y / p.v;
p.y = 0;
p.x -= p.u*t;
p.z -= p.w*t;
if (0 <= p.x &&p.x < LX && 0 <= p.z&&p.z < LZ) return true;
}
else return false;
}
else if (p.y >= LY)
{
if (p.v < 0)
{
ZFloat t = (LY - Delta - p.y) / p.v;
p.y = LY - Delta;
p.x += t*p.u;
p.z += t*p.w;
if (0 <= p.x &&p.x < LX && 0 <= p.z&&p.z < LZ) return true;
}
else return false;
}
if (p.z < 0)
{
//now it's outside the phantom
if (p.w > 0)
{
ZFloat t = p.z / p.w;
p.z = 0;
p.y -= t*p.v;
p.x -= t*p.u;
if (0 <= p.y &&p.y < LY && 0 <= p.x&&p.x < LX) return true;
}
else return false;
}
else if (p.z >= LZ)
{
if (p.w < 0)
{
ZFloat t = (LZ - Delta - p.z) / p.w;
p.z = LZ - Delta;
p.y += t*p.v;
p.x += t*p.u;
if (0 <= p.y &&p.y < LY && 0 <= p.x&&p.x < LX) return true;
}
else return false;
}
}
else return true;
return false;
}
__device__ bool lineIn(ParticleR& p)
{
//if (!lineInPhantom(p)) return false;
if(ForwardDetect) //forward detect the voxel density to skip the air. Is it necessary?
{
const ZFloat step = min(DX, min(DY, DZ));
const int preNum = 1; //how many step it will forwardly detect
const ZFloat dmx = step*p.u;
const ZFloat dmy = step*p.v;
const ZFloat dmz = step*p.w;
while (true)
{
int ix = int((p.x + preNum*dmx) *InvDX);
int iy = int((p.y + preNum*dmy) *InvDY);
int iz = int((p.z + preNum*dmz) *InvDZ);
if (ix < 0 || ix >= NX || iy < 0 || iy >= NY || iz < 0 || iz >= NZ) return false;//it will leave the phantom without scattering
if (getDensity(at(ix, iy, iz)) > GlueF(0.04)) break; //stop when it get close to the target
//advance the particle
p.x += dmx;
p.y += dmy;
p.z += dmz;
}
}
//prepare the voxel index
p.ivx = int(p.x*InvDX);
p.ivy = int(p.y*InvDY);
p.ivz = int(p.z*InvDZ);
//p.iabsv = at(p.ivx, p.ivy, p.ivz); //this will be recalculated anyway
p.x -= p.ivx*DX;
p.y -= p.ivy*DY;
p.z -= p.ivz*DZ;
return true; //now it's ready to transport
}
__device__ ZFloat tperp(ParticleR& p)
{
ZFloat tx = DX - p.x;
if (p.x < tx) tx = p.x;
ZFloat ty = DY - p.y;
if (p.y < ty) ty = p.y;
ZFloat tz = DZ - p.z;
if (p.z < tz) tz = p.z;
return tx < ty && tx < tz ? tx : ty < tz ? ty : tz; //min of (tx, ty, tz)
}
__device__ bool photonFlight(ParticleR & p, ZFloat ds) //return whether particle leaves the phantom
{
p.x += ds*p.u + p.ivx*DX;
p.y += ds*p.v + p.ivy*DY;
p.z += ds*p.w + p.ivz*DZ;
if (p.x < 0 || p.x >= LX || p.y < 0 || p.y >= LY || p.z < 0 || p.z >= LZ) return true;
//calculate the voxel index
p.ivx = int(p.x*InvDX);
p.ivy = int(p.y*InvDY);
p.ivz = int(p.z*InvDZ);
p.x -= DX*p.ivx;
p.y -= DY*p.ivy;
p.z -= DZ*p.ivz;
p.iabsv = at(p.ivx, p.ivy, p.ivz);
return false;
}
// __device__ __forceinline__ bool intersect(ParticleR &p, ZFloat &step, short& idex, short& dvox)
// {
// bool b_intersect = false;
// if (p.v > 0)
// {
// ZFloat next = DY - p.y;
// if (p.v*step > next)
// {
// step = next / p.v;
// idex = 2;
// dvox = 1;
// b_intersect = true;
// }
// }
// else if (p.v < 0)
// {
// ZFloat next = -p.y;
// if (p.v*step < next)
// {
// step = next / p.v;
// idex = 2;
// dvox = -1;
// b_intersect = true;
// }
// }
//
// if (p.w > 0)
// {
// ZFloat next = DZ - p.z;
// if (p.w*step > next)
// {
// step = next / p.w;
// idex = 3;
// dvox = 1;
// b_intersect = true;
// }
// }
// else if (p.w < 0)
// {
// ZFloat next = -p.z;
// if (p.w*step < next)
// {
// step = next / p.w;
// idex = 3;
// dvox = -1;
// b_intersect = true;
// }
// }
//
// if (p.u > 0)
// {
// ZFloat next = DX - p.x;
// if (p.u*step > next)
// {
// step = next / p.u;
// idex = 1;
// dvox = 1;
// b_intersect = true;
// }
// }
// else if (p.u < 0)
// {
// ZFloat next = -p.x;
// if (p.u*step < next)
// {
// step = next / p.u;
// idex = 1;
// dvox = -1;
// b_intersect = true;
// }
// }
//
// return b_intersect;
// }
//
// __device__ __forceinline__ bool intersect(ParticleR &p, ZFloat &step)
// {
// bool b_intersect = false;
// if (p.v > 0)
// {
// ZFloat next = DY - p.y;
// if (p.v*step > next)
// {
// step = next / p.v;
// b_intersect = true;
// }
// }
// else if (p.v < 0)
// {
// ZFloat next = -p.y;
// if (p.v*step < next)
// {
// step = next / p.v;
// b_intersect = true;
// }
// }
//
// if (p.w > 0)
// {
// ZFloat next = DZ - p.z;
// if (p.w*step > next)
// {
// step = next / p.w;
// b_intersect = true;
// }
// }
// else if (p.w < 0)
// {
// ZFloat next = -p.z;
// if (p.w*step < next)
// {
// step = next / p.w;
// b_intersect = true;
// }
// }
//
// if (p.u > 0)
// {
// ZFloat next = DX - p.x;
// if (p.u*step > next)
// {
// step = next / p.u;
// b_intersect = true;
// }
// }
// else if (p.u < 0)
// {
// ZFloat next = -p.x;
// if (p.u*step < next)
// {
// step = next / p.u;
// b_intersect = true;
// }
// }
//
// return b_intersect;
// }
__device__ __forceinline__ bool chvox(ParticleR &p, ZFloat &step, short& idex, short& dvox)
{
switch (idex)
{
case 3:
if (dvox > 0)
{
++p.ivz;
if (p.ivz >= NZ) return false;
p.z = 0;
}
else
{
--p.ivz;
if (p.ivz < 0) return false;
p.z = DZ;
}
p.x += p.u*step;
p.y += p.v*step;
break;
case 2:
if (dvox > 0)
{
++p.ivy;
if (p.ivy >= NY) return false;
p.y = 0;
}
else
{
--p.ivy;
if (p.ivy < 0) return false;
p.y = DY;
}
p.x += p.u*step;
p.z += p.w*step;
break;
default:
if (dvox > 0)
{
++p.ivx;
if (p.ivx >= NX) return false;
p.x = 0;
}
else
{
--p.ivx;
if (p.ivx < 0) return false;
p.x = DX;
}
p.y += p.v*step;
p.z += p.w*step;
}
p.iabsv = at(p.ivx, p.ivy, p.ivz);
return true;
}
__device__ int electronFreeFlight(ParticleR& p, ZFloat Eend)
{
ZFloat voxden = getDensity(p);
ZFloat range = (p.E - ERangeCut)*WaterRangeCS(p.E);
ZFloat finalRange = Eend > ERangeCut ? (Eend - ERangeCut)*WaterRangeCS(Eend) : 0;
while (true)
{
ZFloat step = (range - finalRange) / voxden;
//check if it intersect with the boundary of current voxel
short idex, dvox; //idex = 1,2,3 means x,y,z direction; dvox = +1, -1 means moving in positive or negative direction
bool intersect = false;
if (p.v > 0)
{
ZFloat next = DY - p.y;
if (p.v*step > next)
{
step = next / p.v; idex = 2; dvox = 1; intersect = true;
}
}
else if (p.v < 0)
{
ZFloat next = -p.y;
if (p.v*step < next)
{
step = next / p.v; idex = 2; dvox = -1; intersect = true;
}
}
if (p.w > 0)
{
ZFloat next = DZ - p.z;
if (p.w*step > next)
{
step = next / p.w; idex = 3; dvox = 1; intersect = true;
}
}
else if (p.w < 0)
{
ZFloat next = -p.z;
if (p.w*step < next)
{
step = next / p.w; idex = 3; dvox = -1; intersect = true;
}
}
if (p.u > 0)
{
ZFloat next = DX - p.x;
if (p.u*step > next)
{
step = next / p.u; idex = 1; dvox = 1; intersect = true;
}
}
else if (p.u < 0)
{
ZFloat next = -p.x;
if (p.u*step < next)
{
step = next / p.u; idex = 1; dvox = -1; intersect = true;
}
}
ZFloat newEnergy = Eend;
if (intersect)
{
range -= step*voxden;
newEnergy = WaterInverseRangeCS(range);
if (newEnergy < ERangeCut) newEnergy = 0;
}
deposit(p.iabsv, (p.E - newEnergy)*p.weight);
//up the energy to the new one
p.E = newEnergy;
if (p.E < ERangeCut) return 1; // if this is the final step to local absorption, we don't need to update the position and the direction
//move the electron
p.x += p.u*step;
p.y += p.v*step;
p.z += p.w*step;
if (!intersect) break;
if (2 == idex) //y direction
{
p.ivy += dvox;
if (dvox > 0)
{
if (p.ivy >= NY) return 1;
p.y = 0;
}
else
{
if (p.ivy < 0) return 1;
p.y = DY;
}
}
else if (3 == idex) //z direction
{
p.ivz += dvox;
if (dvox > 0)
{
if (p.ivz >= NZ) return 1;
p.z = 0;
}
else
{
if (p.ivz < 0) return 1;
p.z = DZ;
}
}
else //x direction
{
p.ivx += dvox;
if (dvox > 0)
{
if (p.ivx >= NX) return 1;
p.x = 0;
}
else
{
if (p.ivx < 0) return 1;
p.x = DX;
}
}
p.iabsv = at(p.ivx, p.ivy, p.ivz);
voxden = getDensity(p);
}
return 0;
}
__device__ int electronFlight(ParticleR& p, ZFloat Eend)
{
if (rf == 0) return electronFreeFlight(p, Eend);
//move for electron/photon. Note that coordinates are relative to each voxel
const ZFloat deltax = GlueF(0.01);
ZFloat voxden = getDensity(p);
ZFloat range = (p.E - ERangeCut)*WaterRangeCS(p.E);
ZFloat finalRange = Eend > ERangeCut ? (Eend - ERangeCut)*WaterRangeCS(Eend) : 0;
ZFloat e = GlueF(0.5)*(p.E + Eend);
if (e < EAbsElectron) e = EAbsElectron; // limit to Eabs
//ZFloat uwx = Bx;
//ZFloat uwy = By;
//ZFloat uwz = Bz;
ZFloat Rb = rf*GlueF(sqrt)(e*(e + TEs));
ZFloat Rbi = 1 / Rb; //in case this value being used many times
ZFloat maxStep = GlueF(sqrt)(2 * Rb*deltax); //max allowed distance to move to ensure accuracy
while (true)
{
ZFloat step = (range - finalRange) / voxden;
bool finalStep = true;
if (step > maxStep)
{
step = maxStep;
finalStep = false;
}
//check if it intersect with the boundary of current voxel
int idex, dvox; //idex = 1,2,3 means x,y,z direction; dvox = +1, -1 means moving in positive or negative direction
bool intersect = false;
if (p.v > 0)
{
ZFloat next = DY - p.y;
if (p.v*step > next)
{
step = next / p.v; idex = 2; dvox = 1; intersect = true;
}
}
else if (p.v < 0)
{
ZFloat next = -p.y;
if (p.v*step < next)
{
step = next / p.v; idex = 2; dvox = -1; intersect = true;
}
}
if (p.w > 0)
{
ZFloat next = DZ - p.z;
if (p.w*step > next)
{
step = next / p.w; idex = 3; dvox = 1; intersect = true;
}
}
else if (p.w < 0)
{
ZFloat next = -p.z;
if (p.w*step < next)
{
step = next / p.w; idex = 3; dvox = -1; intersect = true;
}
}
if (p.u > 0)
{
ZFloat next = DX - p.x;
if (p.u*step > next)
{
step = next / p.u; idex = 1; dvox = 1; intersect = true;
}
}
else if (p.u < 0)
{
ZFloat next = -p.x;
if (p.u*step < next)
{
step = next / p.u; idex = 1; dvox = -1; intersect = true;
}
}
if (intersect) finalStep = false;
ZFloat newEnergy = Eend;
if (!finalStep)
{
range -= step*voxden;
newEnergy = WaterInverseRangeCS(range);
if (newEnergy < ERangeCut) newEnergy = 0;
}
deposit(p.iabsv, (p.E - newEnergy)*p.weight);
//up the energy to the new one
p.E = newEnergy;
if (p.E < ERangeCut) return 1; // if this is the final step to local absorption, we don't need to update the position and the direction
//move the electron/positron
p.x += p.u*step;
p.y += p.v*step;
p.z += p.w*step;
ZFloat vuw = p.u*Bx + p.v*By + p.w*Bz;
ZFloat vperpx = p.u - vuw * Bx,
vperpy = p.v - vuw * By,
vperpz = p.w - vuw * Bz;
ZFloat vxwx = vperpy*Bz - vperpz*By,
vxwy = vperpz*Bx - vperpx*Bz,
vxwz = vperpx*By - vperpy*Bx;
// The step-length dependent variables f1 & f2
ZFloat f1, f2;
ZFloat arg = step * Rbi;
// if (arg < GlueF(0.2))
// {
// // arg is small, so use power series expansion of sine and cosine
// ZFloat arg2 = arg*arg;
// f1 = -GlueF(0.5)*arg2 + GlueF(0.0416666667)*arg2*arg2; // for 0.2, relative error is 2.2e-6
// f2 = arg - GlueF(0.16666667)*arg*arg2; // for 0.2, relative error is 1.3e-5, absolute error is 2.6e-6
// }
// else
/* {*/
f1 = GlueF(cos)(arg)-1;
f2 = GlueF(sin)(arg);
/* }*/
// Direction change
ZFloat dvx = f1*vperpx - f2*vxwx; // would simplify to f1*_v.x - f2*_v.y;
ZFloat dvy = f1*vperpy - f2*vxwy; // would simplify to f1*_v.y + f2*_v.x;
ZFloat dvz = f1*vperpz - f2*vxwz; // would simplify to 0 (i.e., component along the magnetic field remains constant).
//update the direction
p.u += dvx;
p.v += dvy;
p.w += dvz;
if (finalStep)
{
ZFloat tp = tperp(p);
ZFloat range = (p.E - ERangeCut)*WaterRangeCS(p.E);
if (range < tp*voxden) // can deposit without further simulation
{
deposit(p.iabsv, p.E*p.weight);
return 1; //end the simulation of this electron
}
return 0;
}
//not the final step, we may need to check the direction
if (intersect)
{
//
// We are entering a new voxel. But because we are also changing the direction, we need to verify that the direction has not changed
// in a way that we are actually coming back into the voxel from which we came. The condition for coming back into the same voxel
// is that v*(v + dv) < 0, where v and dv are the direction and the direction change of the component crossing the voxel boundary
//
switch (idex)
{
case 3:
if (p.w*(p.w - dvz) >= 0) //enter a new voxel
{
p.ivz += dvox;
if (dvox > 0) {
if (p.ivz >= NZ) return 1;
p.z = 0;
}
else {
if (p.ivz < 0) return 1;
p.z = DZ;
}
}
else intersect = false;
break;
case 2:
if (p.v*(p.v - dvy) >= 0)
{
p.ivy += dvox;
if (dvox > 0) {
if (p.ivy >= NY) return 1;
p.y = 0;
}
else {
if (p.ivy < 0) return 1;
p.y = DY;
}
}
else intersect = false;
break;
default:
if (p.u*(p.u - dvx) >= 0)
{
p.ivx += dvox;
if (dvox > 0) {
if (p.ivx >= NX) return 1;
p.x = 0;
}
else {
if (p.ivx < 0) return 1;
p.x = DX;
}
}
else intersect = false;
}
}// end if (intersect)
//still intersect after the direction check, need to update the voxel density and index
// if (intersect) // in GPU, just update all even if some are not necessary
// {
p.iabsv = at(p.ivx, p.ivy, p.ivz);
voxden = getDensity(p);
/* }*/
// //update direction and keep going
// p.u += dvx;
// p.v += dvy;
// p.w += dvz;
}
}
/*>>>>>>>>>>>>>>>>>>>>>>>>>>>>> end: phantom method definitions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>*/
/*<<<<<<<<<<<<<<<<<<<<<<<<<<<<< start: Kernel definitions <<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
__global__ void initThreads(int cardOffset)//init the random number generator, call it before any run
{
size_t it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
RNGState[it].init(cardOffset + it);
}
__device__ __forceinline__ bool refill(size_t it, ParticleR* pInit, ParticleStack& pStack, ParticleR& p, int& cur)
{
if (pStack.empty())
{
while (cur < NBatch)
{
p = pInit[it*NBatch + cur];
++cur;
if (!lineIn(p)) continue;
else return true;
}
return false;
}
else
{
p = pStack.top(); //it must be already in the phantom
pStack.pop();
return true;
}
}
__device__ void Rotate(ZFloat& ux, ZFloat& uy, ZFloat& uz, ZFloat costh, ZFloat cosph, ZFloat sinph) {
ZFloat costh2 = costh*costh;
ZFloat rho2 = ux * ux + uy * uy;
if (rho2 > 0 && costh2 < 1) {
ZFloat a = GlueF(sqrt)((1 - costh2) / rho2);
ZFloat xrho = ux * a;
ZFloat yrho = uy * a;
ZFloat ztmp = uz * cosph;
ux = ux * costh - yrho * sinph + ztmp * xrho;
uy = uy * costh + xrho * sinph + ztmp * yrho;
uz = uz * costh - rho2 * a * cosph;
}
else {
if (costh2 >= 1) {
if (costh < 0) {
ux = -ux; uy = -uy; uz = -uz;
}
return;
}
ZFloat b = GlueF(sqrt)(1 - costh2);
uy = b * sinph;
if (uz > 0) {
ux = b * cosph;
uz = costh;
}
else {
ux = -b * cosph;
uz = -costh;
}
}
};
// __device__ void Rotate(ZFloat& ux, ZFloat& uy, ZFloat& uz, ZFloat costh, GRNG& rng) //rotate with a random phi
// {
// ZFloat phi = 2 * PI*rng();
// ZFloat cphiCompt = GlueF(cos)(phi);
// ZFloat sphiCompt = (phi < PI ? 1 : -1)*GlueF(sqrt)(1 - cphiCompt*cphiCompt);
// }
__device__ void samcom(ZFloat energy, ZFloat &efrac, ZFloat &costh, GRNG& rng)
{
ZFloat ko = energy*INV_ELECTRON_MASS;
ZFloat broi = 1 + 2 * ko; ZFloat bro = 1 / broi;
ZFloat br, temp;
if (ko < 10) {
// "low" energy case: uniformly between bro and bro1.
ZFloat bro1 = 1 - bro;
ZFloat ko2 = ko*ko;
ZFloat rejmax = ko2*(broi + bro);
ZFloat br2;
do {
br = bro + bro1*rng(); br2 = br*br;
} while (rng()*br2*rejmax > ko2*br*(br2 + 1) - (1 - br)*(br*broi - 1));
temp = (1 - br) / (ko*br);
}
else {
// "high" energy case: the usual way
ZFloat broi2 = broi*broi;
ZFloat alpha1 = GlueF(log)(broi);
ZFloat alpha2 = ko*(broi + 1)*bro*bro;
ZFloat alphaS = alpha1 + alpha2;
ZFloat sint;
do {
br = rng()*alphaS < alpha1 ? GlueF(exp)(alpha1*rng())*bro : GlueF(sqrt)(rng()*(broi2 - 1) + 1)*bro;
temp = (1 - br) / (ko*br); sint = temp*(2 - temp);
} while (rng()*(1 + br*br) < br*sint);
}
efrac = br;
costh = 1 - temp;
}
__device__ ZFloat samsca(ZFloat e, GRNG& rng)
{
// The screening parameter at this energy.
ZFloat ie = -1 / e;
ZFloat b = WaterScreenCS(ie);
ZFloat oneb = GlueF(1.0) + b;
// Determine energy bin of the pre-computed q surface.
// Instead of linearly interpolating between energies in each qsurf() evaluation below, we
// use the lower or higher energy of the bin in which the energy falls with the correponding probability
int je; ZFloat pe;
WaterQSGetEnergyIndex1(ie, je, pe);
if (pe > 0)
{
if (rng() < pe) ++je;
}
ZFloat u;
while (1)
{
u = rng();//u is in [0, 1)
if (rng() < WaterQSurface(je, u)) break;
}
ZFloat mu = (oneb - u*(oneb + b)) / (oneb - u); // oneb>=1.0, so (oneb-u) is always positive
return mu;
}
__device__ void electronRun(ParticleR& p, GRNG& rng)
{
ZFloat fuelxt = 0, ebefor = 0, nextEnergy = 0;
while (true)
{
ebefor = p.E - fuelxt;
if (ebefor > EAbsElectron)
{
ZFloat Eloss = min(ebefor, EMaxCSDA);
ZFloat fuel = Eloss*rng();
nextEnergy = p.E - fuelxt - fuel;
if (nextEnergy <= ERangeCut) nextEnergy = 0;
fuelxt = Eloss - fuel;
}
else
{
if (p.E < ERangeCut) return;
nextEnergy = 0;
}
int status = electronFlight(p, nextEnergy);
if (status == 1) return; //below the cut-off energy or exit the phantom
// Check if we can discontinue transport because the electron cannot escape the current voxel
// ZFloat tp = tperp(p);
// ZFloat range = (p.E - ERangeCut)*WaterRangeCS(p.E);
// if (range < tp*getDensity(p))
// {
// deposit(p.iabsv, p.E*p.weight);
// return; //end the simulation of this electron
// }
//do elastic multi-scattering
ZFloat costhe = samsca(ebefor, rng);
if (costhe < 1)
{
ZFloat cphi, sphi;
randomAzimuth(rng(), cphi, sphi);
Rotate(p.u, p.v, p.w, costhe, cphi, sphi);
}
}
}
__device__ void photonRun(ParticleR& p, GRNG& rng, int& hasSec)
{
//this version put electron in stack and simulate later
while (true) //break until this photon is finished
{
ZFloat lamph = WaterPhotonCS(p.E);
ZFloat lammin = 1 / (lamph*MaxDensity);
ZFloat s = -lammin*GlueF(log)(rng());
if (photonFlight(p, s)) return; //finish simulating this photon
ZFloat lamden = lammin * getDensity(p);
ZFloat rn = rng();
if (rn < lamden*lamph)
{
ZFloat lamco = WaterComptonCS(p.E);
if (rn < lamden * lamco)// It's a Compton interaction
{
ParticleR pe = p; // back the status of this photon
ZFloat efracCompt = 1, costheCompt, cphiCompt, sphiCompt;
samcom(p.E, efracCompt, costheCompt, rng);
randomAzimuth(rng(), cphiCompt, sphiCompt);
// Electron energy
efracCompt = 1 - efracCompt;
pe.E = p.E*efracCompt;
// Compute Compton electron direction
ZFloat e0 = p.E * INV_ELECTRON_MASS;
ZFloat cost;
if (efracCompt > FLTEPSILON)
{
cost = (1 + e0) * GlueF(sqrt)(efracCompt / (e0*(2.0 + e0*efracCompt)));
if (cost > 1) cost = 1;
}
else cost = 0;
Rotate(pe.u, pe.v, pe.w, cost, -cphiCompt, -sphiCompt);
p.E -= pe.E; // the scattered photon's energy
electronRun(pe, rng);
if (p.E > EAbsPhoton)
{
Rotate(p.u, p.v, p.w, costheCompt, cphiCompt, sphiCompt);
}
else return;
}
else //should be pair production or photoelectric absorption
{
bool doPair = p.E > TEs; //only possible to do pair production
if (doPair)
{
ZFloat lampair = WaterPairCS(p.E);
if (rn > lamden*(lamco + lampair)) doPair = false;
}
if (doPair) //it's a pair production
{
ParticleR pe(p);
pe.type = electron;
ZFloat Epair1 = rng() * (p.E - TEs);
pe.E = Epair1;
electronRun(pe, rng);
pe.E = p.E - TEs - Epair1;
pe.copyDirection(p);
pe.copyPosition(p);
electronRun(pe, rng);
p.w = 2 * rng() - 1;
ZFloat sinthe = GlueF(sqrt)(1.0 - p.w*p.w);
ZFloat phi = 2 * PI * rng();
p.u = sinthe * GlueF(cos)(phi);
p.v = sinthe * GlueF(sin)(phi);
p.E = Es; //for debug, it should be Es
size_t it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
StackBuff[NStackDepth*it] = p; //since the event is rare, we can use slow global memory
hasSec = 1;
//The other photon has opposite direction
p.u = -p.u;
p.v = -p.v;
p.w = -p.w;
}
else //it's a photoelectric absorption
{
p.type = electron;
electronRun(p, rng);
return;//finish simulating this photon
}
}
}
//else //reject the scattering and continue next move
}
}
__device__ void smartPhotonRun(ParticleR& p, GRNG& rng, int& hasSec)
{
int nsplit = NMaxSplit;
while (true)
{
if (!FixedSplit)
{
//splitting number
ZFloat e = p.E*GlueF(1e-6);
ZFloat e2 = e*e;
ZFloat pInitial = e > GlueF(0.3) ? GlueF(-0.053139) + GlueF(1.0695) * e - GlueF(0.24783) * e2 + GlueF(0.028566) * e*e2 : GlueF(0.25);
nsplit = (int)(pInitial*NMaxSplit);
}
ZFloat rnno1 = rng();
ZFloat rnno2 = rng();
ZFloat delta = 1 / ZFloat(nsplit);
ZFloat eweight = p.weight*delta;
ZFloat lamph = WaterPhotonCS(p.E);
ZFloat lammin = 1 / (lamph*MaxDensity);
ZFloat lamco = -1;
ZFloat lampair = -1;
int keepID = (int)(rng()*nsplit);
// These will be used to remember the relevant Compton scattering variables
ZFloat eCompElec = -1;
ZFloat eu = p.u, ev = p.v, ew = p.w;
ZFloat costheCompt, cphiCompt, sphiCompt; //used to calculate the direction of the scattered photon
ZFloat Epair1 = -1; // remember the pair production energy
ParticleR pOld = p; // remember the initial status
p.E = 0; //default to exit
for (int isplit = 0; isplit < nsplit; ++isplit)
{
ParticleR pi = pOld; //start with the initial status
ZFloat rnnoi = 1 - delta*(rnno1 + isplit);
if (rnnoi <= 0) break;
//ZFloat s = lammin*lambdaCalculator(rnnoi);
ZFloat s = -lammin*GlueF(log)(rnnoi);
if (photonFlight(pi, s)) break;
// update lammin with density in the voxel and get material id.
ZFloat lamden = lammin * getDensity(pi);
if (lamph < 0) lamph = WaterPhotonCS(pi.E); // it's so weird that this statement improved the performance
// Check if a real interaction
if (rnno2 < lamden*lamph) // yes.
{
if (lamco < 0) lamco = WaterComptonCS(pi.E);
if (rnno2 < lamden * lamco) // It's a Compton interaction
{
if (eCompElec < 0) // Haven't sampled a Compton interaction yet, so do it now.
{
//keepCompton = (int)(rng()*nsplit);
// Sample the interaction
ZFloat efracCompt = 1;
samcom(pi.E, efracCompt, costheCompt, rng);
randomAzimuth(rng(), cphiCompt, sphiCompt);
// Electron energy
efracCompt = 1 - efracCompt;
eCompElec = pi.E*efracCompt;
// Compute Compton electron direction
ZFloat e0 = pi.E * INV_ELECTRON_MASS;
//ZFloat efrac1 = 1 - efracCompt;
ZFloat cost;
if (efracCompt > FLTEPSILON)
{
cost = (1 + e0) * GlueF(sqrt)(efracCompt / (e0*(2.0 + e0*efracCompt)));
if (cost > 1) cost = 1;
}
else cost = 0;
Rotate(eu, ev, ew, cost, -cphiCompt, -sphiCompt);
}
//if (isplit == keepCompton && eCompGamma > EAbsPhoton)
if (isplit == keepID)
{
ZFloat eCompGamma = pi.E - eCompElec;
p.E = eCompGamma;
if (eCompGamma > EAbsPhoton)
{
Rotate(p.u, p.v, p.w, costheCompt, cphiCompt, sphiCompt);
p.x = pi.x;
p.y = pi.y;
p.z = pi.z;
p.ivx = pi.ivx;
p.ivy = pi.ivy;
p.ivz = pi.ivz;
p.iabsv = pi.iabsv;
}
}
// Now, instead of first pushing the Compton electron onto the stack and later getting it back, we simply transport it here.
pi.type = electron;
pi.E = eCompElec;
pi.weight = eweight;
pi.u = eu;
pi.v = ev;
pi.w = ew;
electronRun(pi, rng);
}
else // Not a Compton, so check if pair or photo.
{
bool doPair = pi.E > TEs;
if (doPair)
{
if (lampair < 0) lampair = WaterPairCS(pi.E);
if (rnno2 > lamden*(lamco + lampair)) doPair = false;
}
if (doPair) // It's a pair production -> the photon disappears (but we add annihilation photons below as needed).
{
if (Epair1 < 0) // Haven't sampled a pair event yet, so do it now.
{
Epair1 = rng() * (pi.E - TEs);
}
if (isplit == keepID)
{
p.w = 2 * rng() - 1;
ZFloat sinthe = GlueF(sqrt)(1.0 - p.w*p.w);
ZFloat phi = 2 * PI * rng();
p.u = sinthe * GlueF(cos)(phi);
p.v = sinthe * GlueF(sin)(phi);
p.E = Es; //for debug, it should be Es
//copy the position
p.x = pi.x; p.y = pi.y; p.z = pi.z;
p.ivx = pi.ivx; p.ivy = pi.ivy; p.ivz = pi.ivz; p.iabsv = pi.iabsv;
size_t it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
StackBuff[NStackDepth*it] = p; //since the event is rare, we can use slow global memory
hasSec = 1;
//The other photon has opposite direction
p.u = -p.u;
p.v = -p.v;
p.w = -p.w;
}
// ParticleR* pars = (ParticleR*)sharedMem;
// ParticleR& pb = pars[threadIdx.x]; //current particle to process
// pb = pi;
// Put an e+/e- pair on the stack. We do not distinguish between electrons and positrons at this point.
ZFloat bx = pi.x; ZFloat by = pi.y; ZFloat bz = pi.z; //backup position
int bix = pi.ivx; int biy = pi.ivy; int biz = pi.ivz; int biabs = pi.iabsv;
pi.type = electron;
pi.E = Epair1;
pi.weight = eweight;
electronRun(pi, rng);
//restore the position, direction
pi.x = bx; pi.y = by; pi.z = bz;
pi.ivx = bix; pi.ivy = biy; pi.ivz = biz; pi.iabsv = biabs;
pi.u = pOld.u; pi.v = pOld.v; pi.w = pOld.w;
pi.E = pOld.E - TEs - Epair1;
// pi = pb;
// pi.E = pOld.E - TEs - Epair1;
electronRun(pi, rng);
}
else
{
// It's a photo absorption -> the photon disappears
pi.type = electron;
pi.weight = eweight;
electronRun(pi, rng);
}
}
}
else // The interaction was rejected.
{
if (isplit == keepID) //copy the position, and energy
{
p.x = pi.x; p.y = pi.y; p.z = pi.z;
p.ivx = pi.ivx; p.ivy = pi.ivy; p.ivz = pi.ivz; p.iabsv = pi.iabsv;
p.E = pi.E;
}
}
}
if (p.E < EAbsPhoton) return;
}
}
__device__ void ComptonPhotonRun(ParticleR& p, GRNG &rng)
{
while (true)
{
ZFloat lamph = WaterPhotonCS(p.E);
ZFloat lammin = 1 / (lamph*MaxDensity);
ZFloat lamco = WaterComptonCS(p.E);
ZFloat s = -lammin*GlueF(log)(rng());
if (photonFlight(p, s)) return;
// update lammin with density in the voxel and get material id.
ZFloat lamden = lammin * getDensity(p);
ZFloat rnno = rng();
// Check if a real interaction
if (rnno < lamden*lamph) // yes.
{
if (rnno < lamden * lamco) // It's a Compton interaction
{
ZFloat efracCompt, costheCompt;
samcom(p.E, efracCompt, costheCompt, rng);
//deposit the energy of electron
ZFloat eCompGamma = efracCompt*p.E;
deposit(p.iabsv, (p.E - eCompGamma)*p.weight);
if (eCompGamma > EAbsPhoton)
{
ZFloat cphiCompt, sphiCompt;
randomAzimuth(rng(), cphiCompt, sphiCompt);
Rotate(p.u, p.v, p.w, costheCompt, cphiCompt, sphiCompt);
p.E = eCompGamma;
}
else
{
//deposit(p.iabsv, eCompGamma*p.weight);
return;
}
}
else // Not a Compton, deposit all energy here
{
deposit(p.iabsv, p.E*p.weight);
return;
}
}
}
}
__device__ void SmartComptonPhotonRun(ParticleR& p, GRNG &rng)
{
int nsplit = NMaxSplit;
while (true)
{
if (!FixedSplit)
{
//splitting number
ZFloat e = p.E*GlueF(1e-6);
ZFloat e2 = e*e;
ZFloat pInitial = e > GlueF(0.3) ? GlueF(-0.053139) + GlueF(1.0695) * e - GlueF(0.24783) * e2 + GlueF(0.028566) * e*e2 : GlueF(0.25);
nsplit = (int)(pInitial*NMaxSplit);
}
ZFloat rnno1 = rng();
ZFloat rnno2 = rng();
ZFloat delta = 1 / ZFloat(nsplit);
ZFloat eweight = p.weight*delta;
ZFloat lamph = WaterPhotonCS(p.E);
ZFloat lammin = 1 / (lamph*MaxDensity);
ZFloat lamco = WaterComptonCS(p.E);
int keepID = (int)(rng()*nsplit);
// These will be used to remember the relevant Compton scattering variables
ZFloat costheCompt = 0, eCompElec = -1;
//remember initial position
ZFloat px = p.x, py = p.y, pz = p.z;
int pivx = p.ivx, pivy = p.ivy, pivz = p.ivz;
ParticleR pi = p; //the direction of pi will not change
p.E = 0; //default to lose all energy and exit
for (int isplit = 0; isplit < nsplit; ++isplit)
{
//reset the position
pi.x = px;
pi.y = py;
pi.z = pz;
pi.ivx = pivx;
pi.ivy = pivy;
pi.ivz = pivz;
ZFloat rnnoi = 1 - delta*(1 - rnno1 + isplit);
//ZFloat s = lammin*lambdaCalculator(rnnoi); //this implementation is slower than calling log directly
ZFloat s = -lammin*GlueF(log)(rnnoi);
if (photonFlight(pi, s)) break;
ZFloat lamden = lammin*getDensity(pi);
// Check if a real interaction
if (rnno2 < lamden*lamph) // yes.
{
if (rnno2 < lamden * lamco) // It's a Compton interaction
{
if (eCompElec < 0) // Haven't sampled a Compton interaction yet, so do it now.
{
ZFloat efracCompt = 1;
samcom(pi.E, efracCompt, costheCompt, rng);
eCompElec = pi.E*(1 - efracCompt);
}
if (isplit == keepID)
{
p.E = pi.E - eCompElec;
if (p.E > EAbsPhoton)
{
ZFloat cphiCompt, sphiCompt;
randomAzimuth(rng(), cphiCompt, sphiCompt);
Rotate(p.u, p.v, p.w, costheCompt, cphiCompt, sphiCompt);
//copy position
p.x = pi.x;
p.y = pi.y;
p.z = pi.z;
p.ivx = pi.ivx;
p.ivy = pi.ivy;
p.ivz = pi.ivz;
p.iabsv = pi.iabsv;
}
}
deposit(pi.iabsv, eCompElec*eweight); //deposition the energy of the electron
}
else // Not a Compton, deposit all energy here
{
deposit(pi.iabsv, pi.E*eweight);
}
}
else // The interaction was rejected.
{
if (isplit == keepID)
{
//copy energy and position. Direction is actually unchanged
p.E = pi.E;
p.x = pi.x;
p.y = pi.y;
p.z = pi.z;
p.ivx = pi.ivx;
p.ivy = pi.ivy;
p.ivz = pi.ivz;
p.iabsv = pi.iabsv;
}
}
}
if (p.E < EAbsPhoton)
{
//if (p.E > 0) deposit(p.iabsv, p.E*p.weight);
return;
}
}
}
__global__ void
//__launch_bounds__(128, 16)
gZeusSmartRun(ParticleR* pInit) //let's implement in a simple way first
{
unsigned int it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
GRNG rng = RNGState[it]; //get the RNG status
//ParticleR* pars = (ParticleR*)sharedMem;
//ParticleR& p = pars[threadIdx.x]; //current particle to process
ParticleR p; //current particle to process
int hasSec = 0;
int cur = 0;//reset the current particle index
int NT = blockDim.x * gridDim.x;
while (true)
{
//first try to fetch particle from particle stack, and then try to fetch from the particle buffer
if (hasSec)
{
p = StackBuff[NStackDepth*it];
hasSec = 0;
}
else //fetch from the buffer
{
while (true)
{
if (cur == NBatch) // exhausted the buffer
{
RNGState[it] = rng; //record the status of current RNG
return; // exit this thread
}
p = pInit[cur*NT + it];
//p = pInit[it*NBatch + cur];
++cur;
if (lineIn(p)) break;
}
}
smartPhotonRun(p, rng, hasSec);
}
}
__global__ void gZeusRun(ParticleR* pInit) //let's implement in a simple way first
{
unsigned int it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
GRNG rng = RNGState[it]; //get the RNG status
//ParticleR* pars = (ParticleR*)sharedMem;
//ParticleR& p = pars[threadIdx.x]; //current particle to process
ParticleR p; //current particle to process
int hasSec = 0;
int cur = 0;//reset the current particle index
while (true)
{
//first try to fetch particle from particle stack, and then try to fetch from the particle buffer
if (hasSec)
{
p = StackBuff[NStackDepth*it];
hasSec = 0;
}
else //fetch from the buffer
{
while (true)
{
if (cur == NBatch) // exhausted the buffer
{
RNGState[it] = rng; //record the status of current RNG
return; // exit this thread
}
p = pInit[it*NBatch + cur];
++cur;
if (lineIn(p)) break;
}
}
photonRun(p, rng, hasSec);
}
}
__global__ void gZeusComptonRun(ParticleR* pInit) //let's implement in a simple way first
{
unsigned int it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
GRNG rng = RNGState[it]; //get the RNG status
ParticleR p; //current particle to process
int cur = 0;//reset the current particle index
while (true)
{
while (true)
{
if (cur == NBatch) // exhausted the buffer
{
RNGState[it] = rng; //record the status of current RNG
return; // exit this thread
}
p = pInit[it*NBatch + cur];
++cur;
if (lineIn(p)) break;
}
ComptonPhotonRun(p, rng);
}
}
__global__ void gZeusSmartComptonRun(ParticleR* pInit) //let's implement in a simple way first
{
unsigned int it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
GRNG rng = RNGState[it]; //get the RNG status
ParticleR p; //current particle to process
int cur = 0;//reset the current particle index
int NT = blockDim.x * gridDim.x;
while (true)
{
while (true)
{
if (cur == NBatch) // exhausted the buffer
{
RNGState[it] = rng; //record the status of current RNG
return; // exit this thread
}
p = pInit[cur*NT + it];
++cur;
if (lineIn(p)) break;
}
SmartComptonPhotonRun(p, rng);
}
}
/*>>>>>>>>>>>>>>>>>>>>>>>>>>>>> end: Kernel definitions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>*/
/*<<<<<<<<<<<<<<<<<<<<<<<<< start: tool functions of cuda <<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
void cudaErrorCheck(hipError_t cudaStatus,const char* func)
{
if (hipSuccess != cudaStatus)
{
if (func) Log("cuda error: %s in function module-> %s\n", hipGetErrorString(cudaStatus), func);
else Log("cuda error: %s\n", hipGetErrorString(cudaStatus));
exitApp("cuda function call failed!");
}
}
void inline cudaKernelCheck(int i, int it)
{
#ifdef CUDA_KERNEL_CHECK
#ifdef DEBUG
hipDeviceSynchronize();//make sure all kernels are finished
hipError_t cudaStatus = hipGetLastError();
if (hipSuccess != cudaStatus)
{
if (it != -1) Log("thread id %d with cuda kernel number %d error: %s", it, i, hipGetErrorString(cudaStatus));
else Log("cuda kernel number %d error: %s", i, hipGetErrorString(cudaStatus));
exitApp("cuda execuation error in executeJob()");
}
#endif
#endif
}
int ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192 }, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192 }, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192 }, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192 }, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128 }, // Maxwell Generation (SM 5.0) GM10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
int cudaGetMaxGflopsDeviceID(vector<GPUConfig>& gc) // This function returns the best GPU (with maximum GFLOPS)
{
int current_device = 0, sm_per_multiproc = 0;
int best_SM_arch = 0;
int devices_prohibited = 0;
unsigned long long max_compute_perf = 0;
hipDeviceProp_t deviceProp;
// Find the best major SM Architecture GPU device
for (unsigned int i = 0; i < gc.size(); ++i)
{
current_device = gc[i].id;
hipGetDeviceProperties(&deviceProp, current_device);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != hipComputeModeProhibited)
{
if (deviceProp.major > 0 && deviceProp.major < 9999)
{
best_SM_arch = max(best_SM_arch, deviceProp.major);
}
}
else
{
devices_prohibited++;
}
}
// Find the best CUDA capable GPU device
int gc_i = 0;
for (unsigned int i = 0; i < gc.size(); ++i)
{
current_device = gc[i].id;
hipGetDeviceProperties(&deviceProp, current_device);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != hipComputeModeProhibited)
{
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
sm_per_multiproc = 1;
}
else
{
sm_per_multiproc = ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
unsigned long long compute_perf = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if (compute_perf > max_compute_perf)
{
// If we find GPU with SM major > 2, search only these
if (best_SM_arch > 2)
{
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch)
{
max_compute_perf = compute_perf;
gc_i = i;
}
}
else
{
max_compute_perf = compute_perf;
gc_i = i;
}
}
else if (compute_perf = max_compute_perf)
{
if (gc[gc_i].id == 0) gc_i = i; //if GPUs' flops are identical, don't choose 0 device because the OS would use it in priority
}
}
}
return gc_i; // max_perf_device;
}
bool speedCompareFunc(pair<unsigned long long, int> lhs, pair<unsigned long long, int> rhs)
{
return lhs.first > rhs.first;
}
void cudaGetGflopsList(vector<int>& speedList) // This function returns the best GPU (with maximum GFLOPS)
{
hipDeviceProp_t deviceProp;
int devCount = 0;
cudaErrorCheck(hipGetDeviceCount(&devCount));
if (devCount < 1) exitApp("Cannot find any CUDA-capable GPU on your computer!");
vector<pair<unsigned long long, int>> speed;
// Find the best major SM Architecture GPU device
for (int i = 0; i < devCount; ++i)
{
hipGetDeviceProperties(&deviceProp, i);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != hipComputeModeProhibited)
{
int sm_per_multiproc = 1;
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
sm_per_multiproc = 1;
}
else
{
sm_per_multiproc = ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
unsigned long long compute_perf = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
speed.push_back(make_pair(compute_perf, i));
}
}
sort(speed.begin(), speed.end(), speedCompareFunc);
int ng = (int)speed.size();
speedList.resize(ng);
for (int i = 0; i < ng; ++i) speedList[i] = speed[i].second;
}
void printGPUProperties(int i)
{
// Get device properties
Log("\nCUDA Device #%d\n", i);
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, i);
Log("Major revision number: %d\n", devProp.major);
Log("Minor revision number: %d\n", devProp.minor);
Log("Name: %s\n", devProp.name);
Log("Total global memory: %lu\n", devProp.totalGlobalMem);
Log("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
Log("Total registers per block: %d\n", devProp.regsPerBlock);
Log("Warp size: %d\n", devProp.warpSize);
Log("Maximum memory pitch: %lu\n", devProp.memPitch);
Log("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
Log("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
Log("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
Log("Clock rate: %d\n", devProp.clockRate);
Log("Total constant memory: %lu\n", devProp.totalConstMem);
Log("Texture alignment: %lu\n", devProp.textureAlignment);
Log("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
Log("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
Log("Number of total cores: %d\n", ConvertSMVer2Cores(devProp.major, devProp.minor)*devProp.multiProcessorCount);
Log("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
/*>>>>>>>>>>>>>>>>>>>>>>>>>> end: tool functions of cuda >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>*/
/*<<<<<<<<<<<<<<<<<<<<<<<<< start: GZEUS method definitions <<<<<<<<<<<<<<<<<<<<<<<<<<*/
void GZEUS::phantom2GPU() //copy phantom info to GPU. Make sure the phantom has loaded data
{
int NVoxel = _phant->getVoxelNum();
for (unsigned int i = 0; i < gc.size(); ++i)
{
cudaErrorCheck(hipSetDevice(gc[i].id));
//resize data in the GPU
cudaErrorCheck(hipMalloc(&gc[i].d_ph, NVoxel*sizeof(SFloat)),"resize ph");
cudaErrorCheck(hipMemcpyToSymbol(ph, &gc[i].d_ph, sizeof(SFloat *)),"copy ph pointer to GPU constant"); //set the array pointer
cudaErrorCheck(hipMalloc(&gc[i].d_doseScore, NVoxel*sizeof(SFloat)),"resize doseScore");
cudaErrorCheck(hipMemcpyToSymbol(doseScore, &gc[i].d_doseScore, sizeof(SFloat *)), "copy doseScore pointer to GPU constant"); //set the array pointer
//set the initial value of phantom and dose counter
cudaErrorCheck(hipMemcpy(gc[i].d_ph, _phant->ph.getP(), sizeof(SFloat)*NVoxel, hipMemcpyHostToDevice), "init the value of ph in GPU");
cudaErrorCheck(hipMemcpy(gc[i].d_doseScore, _phant->dose.getP(), sizeof(SFloat)*NVoxel, hipMemcpyHostToDevice), "init the value of doseScore in GPU");
//copy the rest constant
cudaErrorCheck(hipMemcpyToSymbol(NX, &_phant->NX, sizeof(int)), "copy NX to GPU");
cudaErrorCheck(hipMemcpyToSymbol(NY, &_phant->NY, sizeof(int)), "copy NY to GPU");
cudaErrorCheck(hipMemcpyToSymbol(NZ, &_phant->NZ, sizeof(int)), "copy NZ to GPU");
ZFloat temp = (ZFloat)_phant->DX;
cudaErrorCheck(hipMemcpyToSymbol(DX, &temp, sizeof(ZFloat)), "copy DX to GPU");
temp = (ZFloat)(1/_phant->DX);
cudaErrorCheck(hipMemcpyToSymbol(InvDX, &temp, sizeof(ZFloat)), "copy InvDX to GPU");
temp = (ZFloat)_phant->DY;
cudaErrorCheck(hipMemcpyToSymbol(DY, &temp, sizeof(ZFloat)), "copy DY to GPU");
temp = (ZFloat)(1 / _phant->DY);
cudaErrorCheck(hipMemcpyToSymbol(InvDY, &temp, sizeof(ZFloat)), "copy InvDY to GPU");
temp = (ZFloat)_phant->DZ;
cudaErrorCheck(hipMemcpyToSymbol(DZ, &temp, sizeof(ZFloat)), "copy DZ to GPU");
temp = (ZFloat)(1 / _phant->DZ);
cudaErrorCheck(hipMemcpyToSymbol(InvDZ, &temp, sizeof(ZFloat)), "copy InvDZ to GPU");
temp = (ZFloat)_phant->LX;
cudaErrorCheck(hipMemcpyToSymbol(LX, &temp, sizeof(ZFloat)), "copy LX to GPU");
temp = (ZFloat)_phant->LY;
cudaErrorCheck(hipMemcpyToSymbol(LY, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->LZ;
cudaErrorCheck(hipMemcpyToSymbol(LZ, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->xo;
cudaErrorCheck(hipMemcpyToSymbol(xo, &temp, sizeof(ZFloat)), "copy xo to GPU");
temp = (ZFloat)_phant->yo;
cudaErrorCheck(hipMemcpyToSymbol(yo, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->zo;
cudaErrorCheck(hipMemcpyToSymbol(zo, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->Bx;
cudaErrorCheck(hipMemcpyToSymbol(Bx, &temp, sizeof(ZFloat)), "copy Bx to GPU");
temp = (ZFloat)_phant->By;
cudaErrorCheck(hipMemcpyToSymbol(By, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->Bz;
cudaErrorCheck(hipMemcpyToSymbol(Bz, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->MaxDensity;
cudaErrorCheck(hipMemcpyToSymbol(MaxDensity, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->rf;
cudaErrorCheck(hipMemcpyToSymbol(rf, &temp, sizeof(ZFloat)));
cudaErrorCheck(hipMemcpyToSymbol(uniform, &_phant->uniform, sizeof(int)), "copy uniform to GPU");
}
}
void GZEUS::initGPU()
{
RunTimeCounter rc;
if (!initWaterCS("ZeusCrossSections.binary", gc)) exitApp("Cannot initiate the water cross sections");
int NGPU = (int)gc.size();
ConfigFile* zcf = _cf->getBlock("ZEUS");
if (zcf == NULL) zcf = _cf->getBlock("PHANTOM"); //in case cannot find the Zeus block; use default values instead
int split = 50, h_NStackDepth = 40;
zcf->getValue("NMaxSplit", split);
zcf->getValue("Particle stack depth", h_NStackDepth);
string str = "yes";
int h_fixedSplit = 1; //default yes
if (zcf->getValue("Fixed Split", str) && str.compare("yes") != 0) h_fixedSplit = 0;
int h_simuElectron = 1; //default yes
if (zcf->getValue("Simulate electron", str) && str.compare("yes") != 0) h_simuElectron = 0;
int h_forwardDetection = 1;//default yes
if (zcf->getValue("Forward detection", str) && str.compare("yes") != 0) h_forwardDetection = 0;
double fv = 0;
ZFloat h_EAbsPhoton = 50e3; //unit eV
if (zcf->getValue("EAbsPhoton", fv)) h_EAbsPhoton = (ZFloat)fv;
ZFloat h_EAbsElectron = 50e3; //unit eV
if (zcf->getValue("EAbsElectron", fv)) h_EAbsElectron = (ZFloat)fv;
ZFloat h_EMaxCSDA = 200e3; //unit eV
if (zcf->getValue("EMaxCSDA", fv)) h_EMaxCSDA = (ZFloat)fv;
for (int i = 0; i < NGPU; ++i) //for each GPU
{
cudaErrorCheck(hipSetDevice(gc[i].id));
int NGPUThread = gc[i].NBlock*gc[i].BlockSize;
//resize initial particle memory in GPU
cudaErrorCheck(hipMalloc(&gc[i].d_InitParsA, NGPUThread * gc[i].NBatch * sizeof(ParticleR)));
cudaErrorCheck(hipMemcpyToSymbol(InitParsA, &gc[i].d_InitParsA, sizeof(ParticleR *)));
cudaErrorCheck(hipMalloc(&gc[i].d_InitParsB, NGPUThread * gc[i].NBatch * sizeof(ParticleR)));
cudaErrorCheck(hipMemcpyToSymbol(InitParsB, &gc[i].d_InitParsB, sizeof(ParticleR *)));
//resize memory for the particle stack in GPU
cudaErrorCheck(hipMalloc(&gc[i].d_stackBuff, NGPUThread * h_NStackDepth* sizeof(ParticleR)));
cudaErrorCheck(hipMemcpyToSymbol(StackBuff, &gc[i].d_stackBuff, sizeof(ParticleR*)));
cudaErrorCheck(hipMemcpyToSymbol(NStackDepth, &h_NStackDepth, sizeof(int)));
//resize memory for the GRNG status in GPU
cudaErrorCheck(hipMalloc(&gc[i].d_RNGState, NGPUThread* sizeof(GRNG)));
cudaErrorCheck(hipMemcpyToSymbol(RNGState, &gc[i].d_RNGState, sizeof(GRNG*)));
cudaErrorCheck(hipMemcpyToSymbol(NBatch, &gc[i].NBatch, sizeof(int)));
cudaErrorCheck(hipMemcpyToSymbol(NMaxSplit, &split, sizeof(int)));
cudaErrorCheck(hipMemcpyToSymbol(FixedSplit, &h_fixedSplit, sizeof(int)));
cudaErrorCheck(hipMemcpyToSymbol(SIMU_ELECTRON, &h_simuElectron, sizeof(int)));
cudaErrorCheck(hipMemcpyToSymbol(ForwardDetect, &h_forwardDetection, sizeof(int)));
cudaErrorCheck(hipMemcpyToSymbol(EAbsPhoton, &h_EAbsPhoton, sizeof(ZFloat)));
cudaErrorCheck(hipMemcpyToSymbol(EAbsElectron, &h_EAbsElectron, sizeof(ZFloat)));
cudaErrorCheck(hipMemcpyToSymbol(EMaxCSDA, &h_EMaxCSDA, sizeof(ZFloat)));
}
Log("\nIt costs %f seconds to init GPU ", rc.stop());
}
void GZEUS::freeGPU()//do some clean up
{
for (unsigned int i = 0; i < gc.size(); ++i)
{
cudaErrorCheck(hipSetDevice(gc[i].id));
cudaErrorCheck(hipFree(gc[i].d_InitParsA));
cudaErrorCheck(hipFree(gc[i].d_InitParsB));
cudaErrorCheck(hipFree(gc[i].d_stackBuff));
cudaErrorCheck(hipFree(gc[i].d_RNGState));
cudaErrorCheck(hipFree(gc[i].d_ph));
cudaErrorCheck(hipFree(gc[i].d_doseScore));
gc[i].destroyStream();
}
}
void GZEUS::init(ConfigFile* cf)
{
_cf = cf;
initGPU();
ConfigFile* ph_cf = cf->getBlock("PHANTOM");
_phant = new Phantom;
_phant->loadPhantom(ph_cf);
string lastDoseFile;
if (cf->getValue("proceed last simulation", lastDoseFile) && lastDoseFile.compare("yes") == 0)
{
cf->getValue("output file name", lastDoseFile);
lastDoseFile += ".dose";
if (!_phant->previousDose(lastDoseFile.c_str())) exitApp("Cannot load last dose file to continue the simulation!");
else Log("load last dose file successfully with %.0f existing histories", _phant->getHist());
}
SourceHead_GetPrescrition(&(_phant->prescriptionDose), &(_phant->treatmentFraction));
phantom2GPU();
}
int GZEUS::getGPUConfig(ConfigFile* gcf)
{
if (NULL == gcf) exitApp("cannot find GPU configuration!");
int devCount = 0;
cudaErrorCheck(hipGetDeviceCount(&devCount));
if (devCount < 1) exitApp("Cannot find any CUDA-capable GPU on your computer!");
string GPU_Query;
gcf->getValue("GPU Query", GPU_Query);
if (GPU_Query.compare("yes") == 0)
{
int devCount;
cudaErrorCheck(hipGetDeviceCount(&devCount));
Log("There are %d CUDA devices listed as follow:\n", devCount);
for (int i = 0; i < devCount; ++i) printGPUProperties(i);
printf("\nDo you want to continue executing GPU computation? y/n\n");
if (getchar() != 'y') exit(0);
}
int NBlock = 128, BlockSize = 256, NBatch = 100, GRNG_Refill_Period = 70, Source_Reuse_Times = 10;
string rngStat;
gcf->getValue("GPU Block Num", NBlock);
gcf->getValue("GPU Block Dim", BlockSize);
gcf->getValue("GPU Batch Num", NBatch);
gcf->getValue("GPU RNG Statistic", rngStat);
gcf->getValue("GRNG Refill Period", GRNG_Refill_Period);
gcf->getValue("Source Reuse Times", Source_Reuse_Times);
//double GPU_Weight = 0;
GPUConfig gpuc;
//gpuc.id = GPU_Index;
gpuc.NBlock = NBlock;
gpuc.BlockSize = BlockSize;
gpuc.NBatch = NBatch;
gpuc.refillPeriod = GRNG_Refill_Period;
gpuc.SourceReuseTimes = Source_Reuse_Times;
vector<int> GPU_in_speed;
cudaGetGflopsList(GPU_in_speed);
vector<int> GPU_Index;
if (!gcf->getValue("GPU Index", GPU_Index)) //no specific GPU index
{
int NGPU = 0;
gcf->getValue("GPU Num", NGPU);
if (NGPU <= 0) exitApp("Invalid GPU index configuration!");
int NGPUAvailable = (int)GPU_in_speed.size();
for (int i = 0; i < NGPU; ++i)
{
if (i < NGPUAvailable) GPU_Index.push_back(GPU_in_speed[i]);
else break;
}
}
for (unsigned int i = 0; i < GPU_Index.size(); ++i)
{
if (GPU_Index[i] >= 0 && GPU_Index[i] < devCount)
{
gpuc.id = GPU_Index[i];
gc.push_back(gpuc);
}
else exitApp("Invalid GPU index");
}
//find the best GPU as the main thread, and optimize the work load
int main_id = 0;
if (gc.size() > 1) main_id = cudaGetMaxGflopsDeviceID(gc);
Log("/******************* The following GPU will be used ***************************/");
for (unsigned int i = 0; i < gc.size(); ++i) printGPUProperties(gc[i].id);
Log("/************************ End GPU description *********************************/\n\n");
//create streams of GPU control
for (unsigned int i = 0; i < gc.size(); ++i) gc[i].createStream();
return main_id;
}
/*>>>>>>>>>>>>>>>>>>>>>>>>>> end: GZEUS method definitions >>>>>>>>>>>>>>>>>>>>>>>>>>>>*/
RunTimeCounter sourceCounter;
void getSource(SourcePool* sp, volatile int* hist)
{
sourceCounter.start();
*hist = sp->prepareCopy();//prepare one batch before any run
Log("time cost to generate particle = %f s", sourceCounter.stop());
}
int NThread, NProcess, pID;
bool b_peek = false;
BinaryFile gBF; //to store the raw dose data
ProgressCallBack progressCB = NULL;
PeekDoseCallBack peekDoseCB = NULL;
JobFinishedCallBack jobFinishedCB = NULL;
LogCallBack exitCB = NULL;
// Begin: essential for command line call
bool b_thread_active = false;
bool b_abort = false;
#ifdef WIN32
#define DEXPORT __declspec(dllexport)
#else
#define DEXPORT __attribute__ ((visibility ("default")))
#endif
extern "C" DEXPORT void startSimulation(const char* configFileName, MPS& configMacro, bool bWait) //launch a thread to do the simulation
{
b_thread_active = true;
std::thread thd(executeJob, configFileName, 1, std::ref(configMacro));
if (bWait) thd.join();
else thd.detach();
}
extern "C" DEXPORT void stopSimulation()
{
if (b_thread_active) b_abort = true; //only if the thread is on
}
// End: essential for command line call
extern "C" DEXPORT void peekDose() //mark to generate intermediate dose
{
if (b_thread_active) b_peek = true; //only if the thread is on
}
//interfaces to register the call-back function
extern "C" DEXPORT void setProgressCallBack(ProgressCallBack pcb){ progressCB = pcb; }
extern "C" DEXPORT void setPeekDoseCallBack(PeekDoseCallBack pcb){ peekDoseCB = pcb; }
extern "C" DEXPORT void setJobFinishedCallBack(JobFinishedCallBack jcb){ jobFinishedCB = jcb; }
extern "C" DEXPORT void setLogCallBack(LogCallBack lcb){ Log.setCallBack(lcb); }
extern "C" DEXPORT void setExitCallBack(LogCallBack ecb){ exitCB = ecb; }
void exitApp(const char *inf)
{
Log("fatal error: %s", inf);
Log.flush(); //flush the log file's buffer
if (exitCB) exitCB(inf); // let the call back function display the error
else
{
Log("\nPress enter key to exit...");
getchar();
}
exit(-1);
}
void executeJob(const char* configFileName, double processWeight, MPS& configMacro) //execute one job according to the config file
{
RunTimeCounter totalTime;
ConfigFile cf(configFileName); //parse the total config file
//find out where is the config file located
string cdir(configFileName);
size_t pos = cdir.find_last_of("\\/");
cdir.erase(++pos, string::npos);
cf.macroReplace(string("$cdir$"), cdir);
cf.macroReplace(configMacro);
string logDir, logAppend, logDescription;
cf.getValue("log file directory", logDir);
cf.getValue("log file append", logAppend);
cf.getValue("log description", logDescription);
if (0 == logAppend.compare("no")) logAppend = "w";
else logAppend = "a";
Log.setLogName(pID, logAppend, logDir);//start log recording for this job
Log("The job config file name = %s", configFileName);
if (logDescription.compare("NA") != 0) Log("Short description: %s", logDescription.c_str());
Log("Start log time = %s\n\n", Log.timeNow());
double fNSIMU = 1e7;
cf.getValue("NSIMU", fNSIMU);
fNSIMU *= processWeight; //real workload on this node
double targetErr = -1;
cf.getValue("target uncertainty", targetErr);
double targetErr2 = targetErr*targetErr;
double thresholdRegion = 0.5; //default 50% of the max dose as threshold for uncertainty calculation
cf.getValue("threshold region", thresholdRegion);
string outname;
cf.getValue("output file name", outname);
ConfigFile* zcf = cf.getBlock("ZEUS");
if (zcf == NULL) zcf = cf.getBlock("PHANTOM"); //in case cannot find the Zeus block; use default values instead
int nsplit = 50;
zcf->getValue("NMaxSplit", nsplit);
bool simuElectron = true;
string str;
if (zcf->getValue("Simulate electron", str) && str.compare("yes") != 0) simuElectron = false;
//search and config the GPU part ->> get gc
GZEUS zeus;
vector<GPUConfig>& gc = zeus.gc;//makes the name shorter
ConfigFile *gcf = cf.getBlock("GPU");
int main_id = zeus.getGPUConfig(gcf);
//initialize GZEUS and SourceHead by configurations
ConfigFile *scf = cf.getBlock("SOURCEHEAD");
SourceHead_Init(scf);
#ifdef SOURCE_STATIStICS
//for the source energy statistics
PRNG _rng;
_rng.init(1234);
Particle pars[100];
int NES = int(1e6);
double* Ens = new double[NES];
int nsam = 0;
while (nsam < NES)
{
int np = SourceHead_Sample(&_rng, pars);
for (int i = 0; i < np; ++i)
{
Ens[nsam] = pars[i].E;
++nsam;
if (nsam >= NES) break;
}
}
FILE* fps = fopen("E.txt", "wb");
fwrite(Ens, NES, 1, fps);
fclose(fps);
delete[] Ens;
#endif
//string dataDir;
//scf->getValue("DataDir",dataDir);
//if (!ZeusData_load(dataDir.c_str())) exitApp("Cannot load Zeus cross-sections correctly!");
zeus.init(&cf); //prepare GPU data and phantom
fNSIMU -= zeus._phant->Hist; //subtract previous histories
if (fNSIMU <= 0)
{
Log("Don't need to run any more history! Skip this task...\n\n");
return;
}
//config the source particle pool
int NGT = 1, NGStack = 400;
scf->getValue("NThread", NGT);
scf->getValue("Sample Stack Depth", NGStack);
int NOneFetch = gc[0].getNOneBatch();
SourcePool sp(&gc, NGT, NOneFetch, zeus._phant, NGStack);
Log("\nCalculating dose, please wait patiently...\n\n");
cf.getValue("log short mode", logDescription);
if (logDescription.compare("yes") == 0) Log.shortMode(true);
RunTimeCounter rc; //count the calculating time
//note history number != particle number in the source pool
const int NGPU = (int)gc.size();
//history number generated once by the source pool, only modified by one thread,
//but accessed by multiple threads. It can be read only after the generating thread finished.
volatile int histNew = 0; //histNew isn't always the same as hisAdd because histNew is modified in an isolated thread
volatile int histAdd = 0; //this variable is shared by all threads, so add the key word "volatile" for safe
const int firstSeed = zeus._phant->seedBegin( NGPU *gc[0].NBlock*gc[0].BlockSize);// note it's different from gPENELOPE
std::thread sthread; //source generating thread, unattached
RunTimeCounter kernelCounter;
RunTimeCounter copyCounter;
sthread = std::thread(&getSource, &sp, &histNew);
vector<SFloat*> dose(NGPU);//to store dose from all GPU cards
vector<SFloat*> uncertainty(NGPU);
int NVoxel = zeus._phant->getVoxelNum();
int nBatch = 0; //count the batch number that has been done
const int NBlock = gc[0].NBlock;
const int BlockSize = gc[0].BlockSize;
// module of auto reuse source particles
volatile int Source_Reuse_Times = gc[0].SourceReuseTimes; //maybe modified by one thread, so use volatile for safe
bool autoSourceReuse = false; //used for auto reuse
if (Source_Reuse_Times < 1)
{
autoSourceReuse = true;
Source_Reuse_Times = 1;
}
bool b_targetErrReached = false;
//each thread takes care of one GPU
#ifdef USE_OPENMP
#pragma omp parallel num_threads(NGPU)
#endif
{
int it = omp_get_thread_num();
cudaErrorCheck(hipSetDevice(gc[it].id)); //set which GPU this thread will operate on
#ifdef USE_SINGLE_PRECISION
WaterQS aWaterQS((float*)h_WaterQSData, NQSURFACE_Q, NQSURFACE_E);
#endif
cudaErrorCheck(hipDeviceSetCacheConfig(hipFuncCachePreferL1));
double fHMax = fNSIMU / NGPU;
double hist = 0;
//std::mt19937 mtrng((unsigned)std::chrono::system_clock::now().time_since_epoch().count());
//generate a random thread for each working thread
//int seed = int((it + 1) * 12345789 * (double(mtrng()) / mtrng.max() + 0.5)); //???
SFloat* gdose = new SFloat[NVoxel]; //to fetch temporary dose from GPU
memset(gdose, 0, sizeof(SFloat)*NVoxel);
// resize memory for CPU end storage
dose[it] = new SFloat[NVoxel]; //to store the final dose of this thread
uncertainty[it] = new SFloat[NVoxel]; //to store the uncertainty
// initialize the dose score
for (int i = 0; i < NVoxel; ++i)
{
dose[it][i] = zeus._phant->dose[i] / NGPU;
uncertainty[it][i] = zeus._phant->uncertainty[i] / NGPU;
}
int seed = firstSeed + it*NBlock*BlockSize; //make sure all seeds are unique; note it's different from gPENELOPE
initThreads << <NBlock, BlockSize >> >(seed);
cudaKernelCheck(0);
int source_reuse = Source_Reuse_Times; //count how many times has been reused; force to initially generate incident particles
ParticleR* pInit = NULL;
while(true) //calculating main loop, end when hist >= fHMax
{
//need to regenerate initial particles
if (source_reuse >= Source_Reuse_Times)
{
if (it == main_id) //it's the main thread
{
//wait until the source is ready
sthread.join();
histAdd = histNew; //means the main GPU has histAdd new histories
}
#pragma omp barrier //wait until all GPU threads arrive here
pInit = sp.getAP(it); //update the particle array pointer
source_reuse = 0; //reset the reuse counter
#pragma omp barrier //wait until all GPUs received the data
//if (it == main_id) sthread = std::thread(&getSource, &sp, &histNew);
if (it == main_id) sthread = std::thread(&getSource, &sp, &histNew); //start a fetch prepare
}
++source_reuse; //count how many time the source has been used
hist += histAdd; // histAdd more histories will be simulated
/****************** Begin a batch run on GPU ********************/
if (it == main_id) kernelCounter.start(); //only count the kernel time for the main GPU
int sharedSize = 0; // sizeof(float)*BlockSize * 16;
if (simuElectron)
{
if (nsplit > 1) gZeusSmartRun << <NBlock, BlockSize, sharedSize, gc[it].kernelstream >> >(pInit);
else gZeusRun << <NBlock, BlockSize, sharedSize, gc[it].kernelstream >> >(pInit);
}
else
{
if (nsplit>1) gZeusSmartComptonRun << <NBlock, BlockSize, sharedSize, gc[it].kernelstream >> >(pInit);
else gZeusComptonRun << <NBlock, BlockSize, sharedSize, gc[it].kernelstream >> >(pInit);
}
hipStreamSynchronize(gc[it].kernelstream);//wait for the kernel to finish
//print the speed information
if (it == main_id)
{
Log("time cost to execute kernels = %f s", kernelCounter.stop());
if (targetErr <= 0)
{
double time = rc.stop(true);
double speed = hist / time;
double rest = 0;
if (fHMax > hist) rest = (fHMax - hist) / speed;
else rest = 0;
Log("GPU processed ------------------------ %3.1f%%, speed = %d h/s\n", hist*100.0 / fHMax, int(speed));
Log("Time escaped = %.1f min, left time expected = %.1f min", time / 60.0, rest / 60.0);
if (progressCB) progressCB(hist*100.0 / fHMax, speed, time, rest, 0);
}
++nBatch;
if (nBatch == 10 && autoSourceReuse) // may change Source_Reuse_Times based on the performance of the first 10 batches
{
int rt = (int)round(sourceCounter.getStoredTime() / kernelCounter.getStoredTime());
if (rt > 1) Source_Reuse_Times = rt;
}
}
/****************** End a batch run on GPU *********************/
//after one batch, we need to fetch dose from GPU to calculate the uncertainty
cudaErrorCheck(hipMemcpy(gdose, gc[it].d_doseScore, sizeof(SFloat)*NVoxel, hipMemcpyDeviceToHost)); //fetch the batch dose from GPU
SFloat minv = gdose[0];
SFloat maxv = minv;
for (int i = 0; i < NVoxel; ++i)
{
minv = min(minv, gdose[i]);
maxv = max(maxv, gdose[i]);
dose[it][i] += gdose[i];
uncertainty[it][i] += gdose[i] * gdose[i];
gdose[i] = 0;
}
if (it == main_id) Log("max dose = %g, min dose = %g", maxv, minv);
cudaErrorCheck(hipMemcpy(gc[it].d_doseScore, gdose, sizeof(SFloat)*NVoxel, hipMemcpyHostToDevice)); //reset the dose counter in GPU
if (it == main_id && b_peek)
{
SFloat* d = new SFloat[NVoxel];
SFloat* u = new SFloat[NVoxel];
//add up dose and uncertainty in different GPU card
for (int j = 0; j < NVoxel; ++j)
{
d[j] = u[j] = 0;
for (int i = 0; i < NGPU; ++i)
{
d[j] += dose[i][j];
u[j] += uncertainty[i][j];
}
}
//Log("\nOutputing the intermediate dose:");
Phantom tempPhant(*zeus._phant);
double norm = 1.0889e15 * SourceHead_BeamOnTime();
tempPhant.addDose(d, u, NGPU * nBatch, NGPU * hist, norm, thresholdRegion);
delete[] d;
delete[] u;
tempPhant.getBinaryFile(gBF);
b_peek = false;
if (peekDoseCB) peekDoseCB(gBF);
}
if (targetErr > 0) //calculate the uncertainty
{
if (it == main_id)
{
double err2 = zeus._phant->peekUncertainty(dose[it], uncertainty[it], nBatch + zeus._phant->nBatch / NGPU, thresholdRegion);
double histEstimate = hist*err2 / (targetErr2*NGPU);
double time = rc.stop(true);
double speed = hist / time;
double rest = (histEstimate - hist) / speed;
Log("GPU processed ------------------------ %3.1f%%, speed = %d h/s\n", hist*100.0 / histEstimate, int(speed));
Log("Time escaped = %.1f min, left time expected = %.1f min", time / 60.0, rest / 60.0);
if (err2 < targetErr2*NGPU) b_targetErrReached = true;
if (progressCB) progressCB(hist*100.0 / histEstimate, speed, time, rest, sqrt(err2 / NGPU)*100.0);
}
#pragma omp barrier //make sure all work threads got the break signal
if (b_targetErrReached) break; //all work threads will break the while loop
}
if (targetErr <= 0 && hist >= fHMax || b_abort) break;
}
//finish in this GPU thread
gc[it].hist = hist;
if (!b_abort && it == main_id) //if it's aborted, print nothing
{
Log("GPU processed ------------------------ 100%%, speed = %d h/s\n", int(hist / rc.stop(true)));
Log.shortMode(false);
Log("\nWait all GPUs to finish their job...\n");
}
delete[] gdose;
} //end openMP
Log("All GPUs have finished their simulation job! Collecting dose...\n\n");
double totHist = NGPU*gc[0].hist;
SFloat* d = new SFloat[NVoxel];
SFloat* u = new SFloat[NVoxel];
//add up dose and uncertainty in different GPU card
for (int j = 0; j < NVoxel; ++j)
{
d[j] = u[j] = 0;
for (int i = 0; i < NGPU; ++i)
{
d[j] += dose[i][j];
u[j] += uncertainty[i][j];
}
}
for (int i = 0; i < NGPU; ++i)
{
delete[] dose[i];
delete[] uncertainty[i];
}
Log.shortMode(false);
double norm = 1.0889e15 * SourceHead_BeamOnTime();
zeus._phant->addDose(d, u, NGPU * nBatch, totHist, norm, thresholdRegion);
zeus._phant->getBinaryFile(gBF);
delete[] d;
delete[] u;
if (b_abort) outname += "_abort";// make sure it wouldn't overwrite the last file
outname += ".dose";
string vrFormat;
cf.getValue("ViewRay format", vrFormat);
if (vrFormat.compare("yes") == 0) zeus._phant->output(outname.c_str(), 1);
else zeus._phant->output(outname.c_str());
Log("Wait for the source generating thread to finish...");
sthread.join();
SourceHead_Delete(); //release the source head resource safely
Log("Source reuse times = %d", Source_Reuse_Times);
Log("\nTime statistics for main GPU:");
//Log("Total copy time =%.2f s", copyCounter.getStoredTime());
Log("Total kernel time = %.2f s", kernelCounter.getStoredTime());
Log("Total SourceHead time = %.2f s\n\n", sourceCounter.getStoredTime());
Log("Mixed running time = %.2f minutes, total history number = %g", rc.stop(true) / 60.0, totHist);
Log("The overall simulating speed = %d hist/sec\n\n", int(totHist / rc.stop(true)));
Log("End log time = %s\n\n", Log.timeNow());
Log("/##############################################################################/\n\n");
Log.closeFile();
if (jobFinishedCB) jobFinishedCB(b_abort, gBF);
if (b_abort)
{
b_abort = false;
if (jobFinishedCB == NULL) exit(0); //in command line execution mode, exit the program directly
}
b_thread_active = false;
} //end executeJob
|
gZeus.cu
|
#include "gZeus.h"
/*<<<<<<<<<<<<<<<<<<<<<<<<<<<<< start: variables in device memory <<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
//extern __shared__ int sharedMem[];
__constant__ int NBatch;
__constant__ int NStackDepth;
__constant__ int FixedSplit = 1;
__constant__ int ForwardDetect = 1; //whether do forward advancing movement when guidling in the photon
__constant__ int NMaxSplit = 50;
__constant__ int SIMU_ELECTRON = 0;
__constant__ ZFloat EAbsPhoton = GlueF(50e3);
__constant__ ZFloat EAbsElectron = GlueF(50e3);
__constant__ ZFloat ERangeCut = GlueF(10e3);
__constant__ ZFloat EMaxCSDA = GlueF(200e3);
#include "WaterCS.h"
//}}
//{{ pointers to accelerating access in the device
__constant__ ParticleR* InitPars;
__constant__ ParticleR* InitParsA;
__constant__ ParticleR* InitParsB;
__constant__ ParticleR* StackBuff;//memory pointer for stacks
__constant__ GRNG* RNGState;
//}}
//{{ data for phantom
__constant__ int NX, NY, NZ; //voxel number
__constant__ ZFloat DX, DY, DZ; // voxel size, unit cm
__constant__ ZFloat InvDX, InvDY, InvDZ;
__constant__ ZFloat LX, LY, LZ; // side length Lx=DX*NX
__constant__ ZFloat xo, yo, zo;
__constant__ ZFloat MaxDensity;
__constant__ ZFloat Bx, By, Bz; //unit magnetic field direction
__constant__ ZFloat rf;
__constant__ int uniform;
__constant__ SFloat* doseScore; //pointer to dose counter
__constant__ SFloat* ph; //pointer to phantom
Texture1Ddata(SFloat, Phant)
//}}
/*>>>>>>>>>>>>>>>>>>>>>>>>> end: variables in device memory >>>>>>>>>>>>>>>>>>>>>>>>>>>*/
#if __CUDACC_VER_MAJOR__ < 8
//just provide double float version of atomicAdd in case we need it.
__device__ __forceinline__ double atomicAdd(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
// Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN)
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
__device__ void exitKernel(const char inf[])
{
printf("error: %s\n\n", inf);
asm("trap;");
}
/*<<<<<<<<<<<<<<<<<<<<<<<<<<<<< start: phantom method definitions <<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
__device__ ZFloat getDensity(int iabsv)
{
if (uniform) return MaxDensity;
else return ph[iabsv];
//else return getPhant(iabsv);
}
__device__ __forceinline__ ZFloat getDensity(ParticleR& p)
{
return getDensity(p.iabsv);
}
__device__ __forceinline__ void deposit(int iabsv, ZFloat DE)
{
atomicAdd(doseScore + iabsv, DE);
}
__device__ __forceinline__ bool lineInPhantom(ParticleR& p)
{
//first translate the coordinate system
p.x -= xo;
p.y -= yo;
p.z -= zo;
//assuming the incident particle go straight line, judge if it will enter the phantom.
//if true, give the interaction position
const ZFloat Delta = GlueF(1e-5);
if (p.x < 0 || p.x >= LX || p.y < 0 || p.y >= LY || p.z < 0 || p.z >= LZ) //initial position lays outside the phantom
{
if (p.x < 0)
{
//now it's outside the phantom
if (p.u > 0)
{
ZFloat t = p.x / p.u;
p.x = 0;
p.y -= t*p.v;
p.z -= t*p.w;
if (0 <= p.y &&p.y < LY && 0 <= p.z&&p.z < LZ) return true;
}
else return false;
}
else if (p.x >= LX)
{
if (p.u < 0)
{
ZFloat t = (LX - Delta - p.x) / p.u;
p.x = LX - Delta;
p.y += t*p.v;
p.z += t*p.w;
if (0 <= p.y &&p.y < LY && 0 <= p.z&&p.z < LZ) return true;
}
else return false;
}
if (p.y < 0)
{
//now it's outside the phantom
if (p.v > 0)
{
ZFloat t = p.y / p.v;
p.y = 0;
p.x -= p.u*t;
p.z -= p.w*t;
if (0 <= p.x &&p.x < LX && 0 <= p.z&&p.z < LZ) return true;
}
else return false;
}
else if (p.y >= LY)
{
if (p.v < 0)
{
ZFloat t = (LY - Delta - p.y) / p.v;
p.y = LY - Delta;
p.x += t*p.u;
p.z += t*p.w;
if (0 <= p.x &&p.x < LX && 0 <= p.z&&p.z < LZ) return true;
}
else return false;
}
if (p.z < 0)
{
//now it's outside the phantom
if (p.w > 0)
{
ZFloat t = p.z / p.w;
p.z = 0;
p.y -= t*p.v;
p.x -= t*p.u;
if (0 <= p.y &&p.y < LY && 0 <= p.x&&p.x < LX) return true;
}
else return false;
}
else if (p.z >= LZ)
{
if (p.w < 0)
{
ZFloat t = (LZ - Delta - p.z) / p.w;
p.z = LZ - Delta;
p.y += t*p.v;
p.x += t*p.u;
if (0 <= p.y &&p.y < LY && 0 <= p.x&&p.x < LX) return true;
}
else return false;
}
}
else return true;
return false;
}
__device__ bool lineIn(ParticleR& p)
{
//if (!lineInPhantom(p)) return false;
if(ForwardDetect) //forward detect the voxel density to skip the air. Is it necessary?
{
const ZFloat step = min(DX, min(DY, DZ));
const int preNum = 1; //how many step it will forwardly detect
const ZFloat dmx = step*p.u;
const ZFloat dmy = step*p.v;
const ZFloat dmz = step*p.w;
while (true)
{
int ix = int((p.x + preNum*dmx) *InvDX);
int iy = int((p.y + preNum*dmy) *InvDY);
int iz = int((p.z + preNum*dmz) *InvDZ);
if (ix < 0 || ix >= NX || iy < 0 || iy >= NY || iz < 0 || iz >= NZ) return false;//it will leave the phantom without scattering
if (getDensity(at(ix, iy, iz)) > GlueF(0.04)) break; //stop when it get close to the target
//advance the particle
p.x += dmx;
p.y += dmy;
p.z += dmz;
}
}
//prepare the voxel index
p.ivx = int(p.x*InvDX);
p.ivy = int(p.y*InvDY);
p.ivz = int(p.z*InvDZ);
//p.iabsv = at(p.ivx, p.ivy, p.ivz); //this will be recalculated anyway
p.x -= p.ivx*DX;
p.y -= p.ivy*DY;
p.z -= p.ivz*DZ;
return true; //now it's ready to transport
}
__device__ ZFloat tperp(ParticleR& p)
{
ZFloat tx = DX - p.x;
if (p.x < tx) tx = p.x;
ZFloat ty = DY - p.y;
if (p.y < ty) ty = p.y;
ZFloat tz = DZ - p.z;
if (p.z < tz) tz = p.z;
return tx < ty && tx < tz ? tx : ty < tz ? ty : tz; //min of (tx, ty, tz)
}
__device__ bool photonFlight(ParticleR & p, ZFloat ds) //return whether particle leaves the phantom
{
p.x += ds*p.u + p.ivx*DX;
p.y += ds*p.v + p.ivy*DY;
p.z += ds*p.w + p.ivz*DZ;
if (p.x < 0 || p.x >= LX || p.y < 0 || p.y >= LY || p.z < 0 || p.z >= LZ) return true;
//calculate the voxel index
p.ivx = int(p.x*InvDX);
p.ivy = int(p.y*InvDY);
p.ivz = int(p.z*InvDZ);
p.x -= DX*p.ivx;
p.y -= DY*p.ivy;
p.z -= DZ*p.ivz;
p.iabsv = at(p.ivx, p.ivy, p.ivz);
return false;
}
// __device__ __forceinline__ bool intersect(ParticleR &p, ZFloat &step, short& idex, short& dvox)
// {
// bool b_intersect = false;
// if (p.v > 0)
// {
// ZFloat next = DY - p.y;
// if (p.v*step > next)
// {
// step = next / p.v;
// idex = 2;
// dvox = 1;
// b_intersect = true;
// }
// }
// else if (p.v < 0)
// {
// ZFloat next = -p.y;
// if (p.v*step < next)
// {
// step = next / p.v;
// idex = 2;
// dvox = -1;
// b_intersect = true;
// }
// }
//
// if (p.w > 0)
// {
// ZFloat next = DZ - p.z;
// if (p.w*step > next)
// {
// step = next / p.w;
// idex = 3;
// dvox = 1;
// b_intersect = true;
// }
// }
// else if (p.w < 0)
// {
// ZFloat next = -p.z;
// if (p.w*step < next)
// {
// step = next / p.w;
// idex = 3;
// dvox = -1;
// b_intersect = true;
// }
// }
//
// if (p.u > 0)
// {
// ZFloat next = DX - p.x;
// if (p.u*step > next)
// {
// step = next / p.u;
// idex = 1;
// dvox = 1;
// b_intersect = true;
// }
// }
// else if (p.u < 0)
// {
// ZFloat next = -p.x;
// if (p.u*step < next)
// {
// step = next / p.u;
// idex = 1;
// dvox = -1;
// b_intersect = true;
// }
// }
//
// return b_intersect;
// }
//
// __device__ __forceinline__ bool intersect(ParticleR &p, ZFloat &step)
// {
// bool b_intersect = false;
// if (p.v > 0)
// {
// ZFloat next = DY - p.y;
// if (p.v*step > next)
// {
// step = next / p.v;
// b_intersect = true;
// }
// }
// else if (p.v < 0)
// {
// ZFloat next = -p.y;
// if (p.v*step < next)
// {
// step = next / p.v;
// b_intersect = true;
// }
// }
//
// if (p.w > 0)
// {
// ZFloat next = DZ - p.z;
// if (p.w*step > next)
// {
// step = next / p.w;
// b_intersect = true;
// }
// }
// else if (p.w < 0)
// {
// ZFloat next = -p.z;
// if (p.w*step < next)
// {
// step = next / p.w;
// b_intersect = true;
// }
// }
//
// if (p.u > 0)
// {
// ZFloat next = DX - p.x;
// if (p.u*step > next)
// {
// step = next / p.u;
// b_intersect = true;
// }
// }
// else if (p.u < 0)
// {
// ZFloat next = -p.x;
// if (p.u*step < next)
// {
// step = next / p.u;
// b_intersect = true;
// }
// }
//
// return b_intersect;
// }
__device__ __forceinline__ bool chvox(ParticleR &p, ZFloat &step, short& idex, short& dvox)
{
switch (idex)
{
case 3:
if (dvox > 0)
{
++p.ivz;
if (p.ivz >= NZ) return false;
p.z = 0;
}
else
{
--p.ivz;
if (p.ivz < 0) return false;
p.z = DZ;
}
p.x += p.u*step;
p.y += p.v*step;
break;
case 2:
if (dvox > 0)
{
++p.ivy;
if (p.ivy >= NY) return false;
p.y = 0;
}
else
{
--p.ivy;
if (p.ivy < 0) return false;
p.y = DY;
}
p.x += p.u*step;
p.z += p.w*step;
break;
default:
if (dvox > 0)
{
++p.ivx;
if (p.ivx >= NX) return false;
p.x = 0;
}
else
{
--p.ivx;
if (p.ivx < 0) return false;
p.x = DX;
}
p.y += p.v*step;
p.z += p.w*step;
}
p.iabsv = at(p.ivx, p.ivy, p.ivz);
return true;
}
__device__ int electronFreeFlight(ParticleR& p, ZFloat Eend)
{
ZFloat voxden = getDensity(p);
ZFloat range = (p.E - ERangeCut)*WaterRangeCS(p.E);
ZFloat finalRange = Eend > ERangeCut ? (Eend - ERangeCut)*WaterRangeCS(Eend) : 0;
while (true)
{
ZFloat step = (range - finalRange) / voxden;
//check if it intersect with the boundary of current voxel
short idex, dvox; //idex = 1,2,3 means x,y,z direction; dvox = +1, -1 means moving in positive or negative direction
bool intersect = false;
if (p.v > 0)
{
ZFloat next = DY - p.y;
if (p.v*step > next)
{
step = next / p.v; idex = 2; dvox = 1; intersect = true;
}
}
else if (p.v < 0)
{
ZFloat next = -p.y;
if (p.v*step < next)
{
step = next / p.v; idex = 2; dvox = -1; intersect = true;
}
}
if (p.w > 0)
{
ZFloat next = DZ - p.z;
if (p.w*step > next)
{
step = next / p.w; idex = 3; dvox = 1; intersect = true;
}
}
else if (p.w < 0)
{
ZFloat next = -p.z;
if (p.w*step < next)
{
step = next / p.w; idex = 3; dvox = -1; intersect = true;
}
}
if (p.u > 0)
{
ZFloat next = DX - p.x;
if (p.u*step > next)
{
step = next / p.u; idex = 1; dvox = 1; intersect = true;
}
}
else if (p.u < 0)
{
ZFloat next = -p.x;
if (p.u*step < next)
{
step = next / p.u; idex = 1; dvox = -1; intersect = true;
}
}
ZFloat newEnergy = Eend;
if (intersect)
{
range -= step*voxden;
newEnergy = WaterInverseRangeCS(range);
if (newEnergy < ERangeCut) newEnergy = 0;
}
deposit(p.iabsv, (p.E - newEnergy)*p.weight);
//up the energy to the new one
p.E = newEnergy;
if (p.E < ERangeCut) return 1; // if this is the final step to local absorption, we don't need to update the position and the direction
//move the electron
p.x += p.u*step;
p.y += p.v*step;
p.z += p.w*step;
if (!intersect) break;
if (2 == idex) //y direction
{
p.ivy += dvox;
if (dvox > 0)
{
if (p.ivy >= NY) return 1;
p.y = 0;
}
else
{
if (p.ivy < 0) return 1;
p.y = DY;
}
}
else if (3 == idex) //z direction
{
p.ivz += dvox;
if (dvox > 0)
{
if (p.ivz >= NZ) return 1;
p.z = 0;
}
else
{
if (p.ivz < 0) return 1;
p.z = DZ;
}
}
else //x direction
{
p.ivx += dvox;
if (dvox > 0)
{
if (p.ivx >= NX) return 1;
p.x = 0;
}
else
{
if (p.ivx < 0) return 1;
p.x = DX;
}
}
p.iabsv = at(p.ivx, p.ivy, p.ivz);
voxden = getDensity(p);
}
return 0;
}
__device__ int electronFlight(ParticleR& p, ZFloat Eend)
{
if (rf == 0) return electronFreeFlight(p, Eend);
//move for electron/photon. Note that coordinates are relative to each voxel
const ZFloat deltax = GlueF(0.01);
ZFloat voxden = getDensity(p);
ZFloat range = (p.E - ERangeCut)*WaterRangeCS(p.E);
ZFloat finalRange = Eend > ERangeCut ? (Eend - ERangeCut)*WaterRangeCS(Eend) : 0;
ZFloat e = GlueF(0.5)*(p.E + Eend);
if (e < EAbsElectron) e = EAbsElectron; // limit to Eabs
//ZFloat uwx = Bx;
//ZFloat uwy = By;
//ZFloat uwz = Bz;
ZFloat Rb = rf*GlueF(sqrt)(e*(e + TEs));
ZFloat Rbi = 1 / Rb; //in case this value being used many times
ZFloat maxStep = GlueF(sqrt)(2 * Rb*deltax); //max allowed distance to move to ensure accuracy
while (true)
{
ZFloat step = (range - finalRange) / voxden;
bool finalStep = true;
if (step > maxStep)
{
step = maxStep;
finalStep = false;
}
//check if it intersect with the boundary of current voxel
int idex, dvox; //idex = 1,2,3 means x,y,z direction; dvox = +1, -1 means moving in positive or negative direction
bool intersect = false;
if (p.v > 0)
{
ZFloat next = DY - p.y;
if (p.v*step > next)
{
step = next / p.v; idex = 2; dvox = 1; intersect = true;
}
}
else if (p.v < 0)
{
ZFloat next = -p.y;
if (p.v*step < next)
{
step = next / p.v; idex = 2; dvox = -1; intersect = true;
}
}
if (p.w > 0)
{
ZFloat next = DZ - p.z;
if (p.w*step > next)
{
step = next / p.w; idex = 3; dvox = 1; intersect = true;
}
}
else if (p.w < 0)
{
ZFloat next = -p.z;
if (p.w*step < next)
{
step = next / p.w; idex = 3; dvox = -1; intersect = true;
}
}
if (p.u > 0)
{
ZFloat next = DX - p.x;
if (p.u*step > next)
{
step = next / p.u; idex = 1; dvox = 1; intersect = true;
}
}
else if (p.u < 0)
{
ZFloat next = -p.x;
if (p.u*step < next)
{
step = next / p.u; idex = 1; dvox = -1; intersect = true;
}
}
if (intersect) finalStep = false;
ZFloat newEnergy = Eend;
if (!finalStep)
{
range -= step*voxden;
newEnergy = WaterInverseRangeCS(range);
if (newEnergy < ERangeCut) newEnergy = 0;
}
deposit(p.iabsv, (p.E - newEnergy)*p.weight);
//up the energy to the new one
p.E = newEnergy;
if (p.E < ERangeCut) return 1; // if this is the final step to local absorption, we don't need to update the position and the direction
//move the electron/positron
p.x += p.u*step;
p.y += p.v*step;
p.z += p.w*step;
ZFloat vuw = p.u*Bx + p.v*By + p.w*Bz;
ZFloat vperpx = p.u - vuw * Bx,
vperpy = p.v - vuw * By,
vperpz = p.w - vuw * Bz;
ZFloat vxwx = vperpy*Bz - vperpz*By,
vxwy = vperpz*Bx - vperpx*Bz,
vxwz = vperpx*By - vperpy*Bx;
// The step-length dependent variables f1 & f2
ZFloat f1, f2;
ZFloat arg = step * Rbi;
// if (arg < GlueF(0.2))
// {
// // arg is small, so use power series expansion of sine and cosine
// ZFloat arg2 = arg*arg;
// f1 = -GlueF(0.5)*arg2 + GlueF(0.0416666667)*arg2*arg2; // for 0.2, relative error is 2.2e-6
// f2 = arg - GlueF(0.16666667)*arg*arg2; // for 0.2, relative error is 1.3e-5, absolute error is 2.6e-6
// }
// else
/* {*/
f1 = GlueF(cos)(arg)-1;
f2 = GlueF(sin)(arg);
/* }*/
// Direction change
ZFloat dvx = f1*vperpx - f2*vxwx; // would simplify to f1*_v.x - f2*_v.y;
ZFloat dvy = f1*vperpy - f2*vxwy; // would simplify to f1*_v.y + f2*_v.x;
ZFloat dvz = f1*vperpz - f2*vxwz; // would simplify to 0 (i.e., component along the magnetic field remains constant).
//update the direction
p.u += dvx;
p.v += dvy;
p.w += dvz;
if (finalStep)
{
ZFloat tp = tperp(p);
ZFloat range = (p.E - ERangeCut)*WaterRangeCS(p.E);
if (range < tp*voxden) // can deposit without further simulation
{
deposit(p.iabsv, p.E*p.weight);
return 1; //end the simulation of this electron
}
return 0;
}
//not the final step, we may need to check the direction
if (intersect)
{
//
// We are entering a new voxel. But because we are also changing the direction, we need to verify that the direction has not changed
// in a way that we are actually coming back into the voxel from which we came. The condition for coming back into the same voxel
// is that v*(v + dv) < 0, where v and dv are the direction and the direction change of the component crossing the voxel boundary
//
switch (idex)
{
case 3:
if (p.w*(p.w - dvz) >= 0) //enter a new voxel
{
p.ivz += dvox;
if (dvox > 0) {
if (p.ivz >= NZ) return 1;
p.z = 0;
}
else {
if (p.ivz < 0) return 1;
p.z = DZ;
}
}
else intersect = false;
break;
case 2:
if (p.v*(p.v - dvy) >= 0)
{
p.ivy += dvox;
if (dvox > 0) {
if (p.ivy >= NY) return 1;
p.y = 0;
}
else {
if (p.ivy < 0) return 1;
p.y = DY;
}
}
else intersect = false;
break;
default:
if (p.u*(p.u - dvx) >= 0)
{
p.ivx += dvox;
if (dvox > 0) {
if (p.ivx >= NX) return 1;
p.x = 0;
}
else {
if (p.ivx < 0) return 1;
p.x = DX;
}
}
else intersect = false;
}
}// end if (intersect)
//still intersect after the direction check, need to update the voxel density and index
// if (intersect) // in GPU, just update all even if some are not necessary
// {
p.iabsv = at(p.ivx, p.ivy, p.ivz);
voxden = getDensity(p);
/* }*/
// //update direction and keep going
// p.u += dvx;
// p.v += dvy;
// p.w += dvz;
}
}
/*>>>>>>>>>>>>>>>>>>>>>>>>>>>>> end: phantom method definitions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>*/
/*<<<<<<<<<<<<<<<<<<<<<<<<<<<<< start: Kernel definitions <<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
__global__ void initThreads(int cardOffset)//init the random number generator, call it before any run
{
size_t it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
RNGState[it].init(cardOffset + it);
}
__device__ __forceinline__ bool refill(size_t it, ParticleR* pInit, ParticleStack& pStack, ParticleR& p, int& cur)
{
if (pStack.empty())
{
while (cur < NBatch)
{
p = pInit[it*NBatch + cur];
++cur;
if (!lineIn(p)) continue;
else return true;
}
return false;
}
else
{
p = pStack.top(); //it must be already in the phantom
pStack.pop();
return true;
}
}
__device__ void Rotate(ZFloat& ux, ZFloat& uy, ZFloat& uz, ZFloat costh, ZFloat cosph, ZFloat sinph) {
ZFloat costh2 = costh*costh;
ZFloat rho2 = ux * ux + uy * uy;
if (rho2 > 0 && costh2 < 1) {
ZFloat a = GlueF(sqrt)((1 - costh2) / rho2);
ZFloat xrho = ux * a;
ZFloat yrho = uy * a;
ZFloat ztmp = uz * cosph;
ux = ux * costh - yrho * sinph + ztmp * xrho;
uy = uy * costh + xrho * sinph + ztmp * yrho;
uz = uz * costh - rho2 * a * cosph;
}
else {
if (costh2 >= 1) {
if (costh < 0) {
ux = -ux; uy = -uy; uz = -uz;
}
return;
}
ZFloat b = GlueF(sqrt)(1 - costh2);
uy = b * sinph;
if (uz > 0) {
ux = b * cosph;
uz = costh;
}
else {
ux = -b * cosph;
uz = -costh;
}
}
};
// __device__ void Rotate(ZFloat& ux, ZFloat& uy, ZFloat& uz, ZFloat costh, GRNG& rng) //rotate with a random phi
// {
// ZFloat phi = 2 * PI*rng();
// ZFloat cphiCompt = GlueF(cos)(phi);
// ZFloat sphiCompt = (phi < PI ? 1 : -1)*GlueF(sqrt)(1 - cphiCompt*cphiCompt);
// }
__device__ void samcom(ZFloat energy, ZFloat &efrac, ZFloat &costh, GRNG& rng)
{
ZFloat ko = energy*INV_ELECTRON_MASS;
ZFloat broi = 1 + 2 * ko; ZFloat bro = 1 / broi;
ZFloat br, temp;
if (ko < 10) {
// "low" energy case: uniformly between bro and bro1.
ZFloat bro1 = 1 - bro;
ZFloat ko2 = ko*ko;
ZFloat rejmax = ko2*(broi + bro);
ZFloat br2;
do {
br = bro + bro1*rng(); br2 = br*br;
} while (rng()*br2*rejmax > ko2*br*(br2 + 1) - (1 - br)*(br*broi - 1));
temp = (1 - br) / (ko*br);
}
else {
// "high" energy case: the usual way
ZFloat broi2 = broi*broi;
ZFloat alpha1 = GlueF(log)(broi);
ZFloat alpha2 = ko*(broi + 1)*bro*bro;
ZFloat alphaS = alpha1 + alpha2;
ZFloat sint;
do {
br = rng()*alphaS < alpha1 ? GlueF(exp)(alpha1*rng())*bro : GlueF(sqrt)(rng()*(broi2 - 1) + 1)*bro;
temp = (1 - br) / (ko*br); sint = temp*(2 - temp);
} while (rng()*(1 + br*br) < br*sint);
}
efrac = br;
costh = 1 - temp;
}
__device__ ZFloat samsca(ZFloat e, GRNG& rng)
{
// The screening parameter at this energy.
ZFloat ie = -1 / e;
ZFloat b = WaterScreenCS(ie);
ZFloat oneb = GlueF(1.0) + b;
// Determine energy bin of the pre-computed q surface.
// Instead of linearly interpolating between energies in each qsurf() evaluation below, we
// use the lower or higher energy of the bin in which the energy falls with the correponding probability
int je; ZFloat pe;
WaterQSGetEnergyIndex1(ie, je, pe);
if (pe > 0)
{
if (rng() < pe) ++je;
}
ZFloat u;
while (1)
{
u = rng();//u is in [0, 1)
if (rng() < WaterQSurface(je, u)) break;
}
ZFloat mu = (oneb - u*(oneb + b)) / (oneb - u); // oneb>=1.0, so (oneb-u) is always positive
return mu;
}
__device__ void electronRun(ParticleR& p, GRNG& rng)
{
ZFloat fuelxt = 0, ebefor = 0, nextEnergy = 0;
while (true)
{
ebefor = p.E - fuelxt;
if (ebefor > EAbsElectron)
{
ZFloat Eloss = min(ebefor, EMaxCSDA);
ZFloat fuel = Eloss*rng();
nextEnergy = p.E - fuelxt - fuel;
if (nextEnergy <= ERangeCut) nextEnergy = 0;
fuelxt = Eloss - fuel;
}
else
{
if (p.E < ERangeCut) return;
nextEnergy = 0;
}
int status = electronFlight(p, nextEnergy);
if (status == 1) return; //below the cut-off energy or exit the phantom
// Check if we can discontinue transport because the electron cannot escape the current voxel
// ZFloat tp = tperp(p);
// ZFloat range = (p.E - ERangeCut)*WaterRangeCS(p.E);
// if (range < tp*getDensity(p))
// {
// deposit(p.iabsv, p.E*p.weight);
// return; //end the simulation of this electron
// }
//do elastic multi-scattering
ZFloat costhe = samsca(ebefor, rng);
if (costhe < 1)
{
ZFloat cphi, sphi;
randomAzimuth(rng(), cphi, sphi);
Rotate(p.u, p.v, p.w, costhe, cphi, sphi);
}
}
}
__device__ void photonRun(ParticleR& p, GRNG& rng, int& hasSec)
{
//this version put electron in stack and simulate later
while (true) //break until this photon is finished
{
ZFloat lamph = WaterPhotonCS(p.E);
ZFloat lammin = 1 / (lamph*MaxDensity);
ZFloat s = -lammin*GlueF(log)(rng());
if (photonFlight(p, s)) return; //finish simulating this photon
ZFloat lamden = lammin * getDensity(p);
ZFloat rn = rng();
if (rn < lamden*lamph)
{
ZFloat lamco = WaterComptonCS(p.E);
if (rn < lamden * lamco)// It's a Compton interaction
{
ParticleR pe = p; // back the status of this photon
ZFloat efracCompt = 1, costheCompt, cphiCompt, sphiCompt;
samcom(p.E, efracCompt, costheCompt, rng);
randomAzimuth(rng(), cphiCompt, sphiCompt);
// Electron energy
efracCompt = 1 - efracCompt;
pe.E = p.E*efracCompt;
// Compute Compton electron direction
ZFloat e0 = p.E * INV_ELECTRON_MASS;
ZFloat cost;
if (efracCompt > FLTEPSILON)
{
cost = (1 + e0) * GlueF(sqrt)(efracCompt / (e0*(2.0 + e0*efracCompt)));
if (cost > 1) cost = 1;
}
else cost = 0;
Rotate(pe.u, pe.v, pe.w, cost, -cphiCompt, -sphiCompt);
p.E -= pe.E; // the scattered photon's energy
electronRun(pe, rng);
if (p.E > EAbsPhoton)
{
Rotate(p.u, p.v, p.w, costheCompt, cphiCompt, sphiCompt);
}
else return;
}
else //should be pair production or photoelectric absorption
{
bool doPair = p.E > TEs; //only possible to do pair production
if (doPair)
{
ZFloat lampair = WaterPairCS(p.E);
if (rn > lamden*(lamco + lampair)) doPair = false;
}
if (doPair) //it's a pair production
{
ParticleR pe(p);
pe.type = electron;
ZFloat Epair1 = rng() * (p.E - TEs);
pe.E = Epair1;
electronRun(pe, rng);
pe.E = p.E - TEs - Epair1;
pe.copyDirection(p);
pe.copyPosition(p);
electronRun(pe, rng);
p.w = 2 * rng() - 1;
ZFloat sinthe = GlueF(sqrt)(1.0 - p.w*p.w);
ZFloat phi = 2 * PI * rng();
p.u = sinthe * GlueF(cos)(phi);
p.v = sinthe * GlueF(sin)(phi);
p.E = Es; //for debug, it should be Es
size_t it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
StackBuff[NStackDepth*it] = p; //since the event is rare, we can use slow global memory
hasSec = 1;
//The other photon has opposite direction
p.u = -p.u;
p.v = -p.v;
p.w = -p.w;
}
else //it's a photoelectric absorption
{
p.type = electron;
electronRun(p, rng);
return;//finish simulating this photon
}
}
}
//else //reject the scattering and continue next move
}
}
__device__ void smartPhotonRun(ParticleR& p, GRNG& rng, int& hasSec)
{
int nsplit = NMaxSplit;
while (true)
{
if (!FixedSplit)
{
//splitting number
ZFloat e = p.E*GlueF(1e-6);
ZFloat e2 = e*e;
ZFloat pInitial = e > GlueF(0.3) ? GlueF(-0.053139) + GlueF(1.0695) * e - GlueF(0.24783) * e2 + GlueF(0.028566) * e*e2 : GlueF(0.25);
nsplit = (int)(pInitial*NMaxSplit);
}
ZFloat rnno1 = rng();
ZFloat rnno2 = rng();
ZFloat delta = 1 / ZFloat(nsplit);
ZFloat eweight = p.weight*delta;
ZFloat lamph = WaterPhotonCS(p.E);
ZFloat lammin = 1 / (lamph*MaxDensity);
ZFloat lamco = -1;
ZFloat lampair = -1;
int keepID = (int)(rng()*nsplit);
// These will be used to remember the relevant Compton scattering variables
ZFloat eCompElec = -1;
ZFloat eu = p.u, ev = p.v, ew = p.w;
ZFloat costheCompt, cphiCompt, sphiCompt; //used to calculate the direction of the scattered photon
ZFloat Epair1 = -1; // remember the pair production energy
ParticleR pOld = p; // remember the initial status
p.E = 0; //default to exit
for (int isplit = 0; isplit < nsplit; ++isplit)
{
ParticleR pi = pOld; //start with the initial status
ZFloat rnnoi = 1 - delta*(rnno1 + isplit);
if (rnnoi <= 0) break;
//ZFloat s = lammin*lambdaCalculator(rnnoi);
ZFloat s = -lammin*GlueF(log)(rnnoi);
if (photonFlight(pi, s)) break;
// update lammin with density in the voxel and get material id.
ZFloat lamden = lammin * getDensity(pi);
if (lamph < 0) lamph = WaterPhotonCS(pi.E); // it's so weird that this statement improved the performance
// Check if a real interaction
if (rnno2 < lamden*lamph) // yes.
{
if (lamco < 0) lamco = WaterComptonCS(pi.E);
if (rnno2 < lamden * lamco) // It's a Compton interaction
{
if (eCompElec < 0) // Haven't sampled a Compton interaction yet, so do it now.
{
//keepCompton = (int)(rng()*nsplit);
// Sample the interaction
ZFloat efracCompt = 1;
samcom(pi.E, efracCompt, costheCompt, rng);
randomAzimuth(rng(), cphiCompt, sphiCompt);
// Electron energy
efracCompt = 1 - efracCompt;
eCompElec = pi.E*efracCompt;
// Compute Compton electron direction
ZFloat e0 = pi.E * INV_ELECTRON_MASS;
//ZFloat efrac1 = 1 - efracCompt;
ZFloat cost;
if (efracCompt > FLTEPSILON)
{
cost = (1 + e0) * GlueF(sqrt)(efracCompt / (e0*(2.0 + e0*efracCompt)));
if (cost > 1) cost = 1;
}
else cost = 0;
Rotate(eu, ev, ew, cost, -cphiCompt, -sphiCompt);
}
//if (isplit == keepCompton && eCompGamma > EAbsPhoton)
if (isplit == keepID)
{
ZFloat eCompGamma = pi.E - eCompElec;
p.E = eCompGamma;
if (eCompGamma > EAbsPhoton)
{
Rotate(p.u, p.v, p.w, costheCompt, cphiCompt, sphiCompt);
p.x = pi.x;
p.y = pi.y;
p.z = pi.z;
p.ivx = pi.ivx;
p.ivy = pi.ivy;
p.ivz = pi.ivz;
p.iabsv = pi.iabsv;
}
}
// Now, instead of first pushing the Compton electron onto the stack and later getting it back, we simply transport it here.
pi.type = electron;
pi.E = eCompElec;
pi.weight = eweight;
pi.u = eu;
pi.v = ev;
pi.w = ew;
electronRun(pi, rng);
}
else // Not a Compton, so check if pair or photo.
{
bool doPair = pi.E > TEs;
if (doPair)
{
if (lampair < 0) lampair = WaterPairCS(pi.E);
if (rnno2 > lamden*(lamco + lampair)) doPair = false;
}
if (doPair) // It's a pair production -> the photon disappears (but we add annihilation photons below as needed).
{
if (Epair1 < 0) // Haven't sampled a pair event yet, so do it now.
{
Epair1 = rng() * (pi.E - TEs);
}
if (isplit == keepID)
{
p.w = 2 * rng() - 1;
ZFloat sinthe = GlueF(sqrt)(1.0 - p.w*p.w);
ZFloat phi = 2 * PI * rng();
p.u = sinthe * GlueF(cos)(phi);
p.v = sinthe * GlueF(sin)(phi);
p.E = Es; //for debug, it should be Es
//copy the position
p.x = pi.x; p.y = pi.y; p.z = pi.z;
p.ivx = pi.ivx; p.ivy = pi.ivy; p.ivz = pi.ivz; p.iabsv = pi.iabsv;
size_t it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
StackBuff[NStackDepth*it] = p; //since the event is rare, we can use slow global memory
hasSec = 1;
//The other photon has opposite direction
p.u = -p.u;
p.v = -p.v;
p.w = -p.w;
}
// ParticleR* pars = (ParticleR*)sharedMem;
// ParticleR& pb = pars[threadIdx.x]; //current particle to process
// pb = pi;
// Put an e+/e- pair on the stack. We do not distinguish between electrons and positrons at this point.
ZFloat bx = pi.x; ZFloat by = pi.y; ZFloat bz = pi.z; //backup position
int bix = pi.ivx; int biy = pi.ivy; int biz = pi.ivz; int biabs = pi.iabsv;
pi.type = electron;
pi.E = Epair1;
pi.weight = eweight;
electronRun(pi, rng);
//restore the position, direction
pi.x = bx; pi.y = by; pi.z = bz;
pi.ivx = bix; pi.ivy = biy; pi.ivz = biz; pi.iabsv = biabs;
pi.u = pOld.u; pi.v = pOld.v; pi.w = pOld.w;
pi.E = pOld.E - TEs - Epair1;
// pi = pb;
// pi.E = pOld.E - TEs - Epair1;
electronRun(pi, rng);
}
else
{
// It's a photo absorption -> the photon disappears
pi.type = electron;
pi.weight = eweight;
electronRun(pi, rng);
}
}
}
else // The interaction was rejected.
{
if (isplit == keepID) //copy the position, and energy
{
p.x = pi.x; p.y = pi.y; p.z = pi.z;
p.ivx = pi.ivx; p.ivy = pi.ivy; p.ivz = pi.ivz; p.iabsv = pi.iabsv;
p.E = pi.E;
}
}
}
if (p.E < EAbsPhoton) return;
}
}
__device__ void ComptonPhotonRun(ParticleR& p, GRNG &rng)
{
while (true)
{
ZFloat lamph = WaterPhotonCS(p.E);
ZFloat lammin = 1 / (lamph*MaxDensity);
ZFloat lamco = WaterComptonCS(p.E);
ZFloat s = -lammin*GlueF(log)(rng());
if (photonFlight(p, s)) return;
// update lammin with density in the voxel and get material id.
ZFloat lamden = lammin * getDensity(p);
ZFloat rnno = rng();
// Check if a real interaction
if (rnno < lamden*lamph) // yes.
{
if (rnno < lamden * lamco) // It's a Compton interaction
{
ZFloat efracCompt, costheCompt;
samcom(p.E, efracCompt, costheCompt, rng);
//deposit the energy of electron
ZFloat eCompGamma = efracCompt*p.E;
deposit(p.iabsv, (p.E - eCompGamma)*p.weight);
if (eCompGamma > EAbsPhoton)
{
ZFloat cphiCompt, sphiCompt;
randomAzimuth(rng(), cphiCompt, sphiCompt);
Rotate(p.u, p.v, p.w, costheCompt, cphiCompt, sphiCompt);
p.E = eCompGamma;
}
else
{
//deposit(p.iabsv, eCompGamma*p.weight);
return;
}
}
else // Not a Compton, deposit all energy here
{
deposit(p.iabsv, p.E*p.weight);
return;
}
}
}
}
__device__ void SmartComptonPhotonRun(ParticleR& p, GRNG &rng)
{
int nsplit = NMaxSplit;
while (true)
{
if (!FixedSplit)
{
//splitting number
ZFloat e = p.E*GlueF(1e-6);
ZFloat e2 = e*e;
ZFloat pInitial = e > GlueF(0.3) ? GlueF(-0.053139) + GlueF(1.0695) * e - GlueF(0.24783) * e2 + GlueF(0.028566) * e*e2 : GlueF(0.25);
nsplit = (int)(pInitial*NMaxSplit);
}
ZFloat rnno1 = rng();
ZFloat rnno2 = rng();
ZFloat delta = 1 / ZFloat(nsplit);
ZFloat eweight = p.weight*delta;
ZFloat lamph = WaterPhotonCS(p.E);
ZFloat lammin = 1 / (lamph*MaxDensity);
ZFloat lamco = WaterComptonCS(p.E);
int keepID = (int)(rng()*nsplit);
// These will be used to remember the relevant Compton scattering variables
ZFloat costheCompt = 0, eCompElec = -1;
//remember initial position
ZFloat px = p.x, py = p.y, pz = p.z;
int pivx = p.ivx, pivy = p.ivy, pivz = p.ivz;
ParticleR pi = p; //the direction of pi will not change
p.E = 0; //default to lose all energy and exit
for (int isplit = 0; isplit < nsplit; ++isplit)
{
//reset the position
pi.x = px;
pi.y = py;
pi.z = pz;
pi.ivx = pivx;
pi.ivy = pivy;
pi.ivz = pivz;
ZFloat rnnoi = 1 - delta*(1 - rnno1 + isplit);
//ZFloat s = lammin*lambdaCalculator(rnnoi); //this implementation is slower than calling log directly
ZFloat s = -lammin*GlueF(log)(rnnoi);
if (photonFlight(pi, s)) break;
ZFloat lamden = lammin*getDensity(pi);
// Check if a real interaction
if (rnno2 < lamden*lamph) // yes.
{
if (rnno2 < lamden * lamco) // It's a Compton interaction
{
if (eCompElec < 0) // Haven't sampled a Compton interaction yet, so do it now.
{
ZFloat efracCompt = 1;
samcom(pi.E, efracCompt, costheCompt, rng);
eCompElec = pi.E*(1 - efracCompt);
}
if (isplit == keepID)
{
p.E = pi.E - eCompElec;
if (p.E > EAbsPhoton)
{
ZFloat cphiCompt, sphiCompt;
randomAzimuth(rng(), cphiCompt, sphiCompt);
Rotate(p.u, p.v, p.w, costheCompt, cphiCompt, sphiCompt);
//copy position
p.x = pi.x;
p.y = pi.y;
p.z = pi.z;
p.ivx = pi.ivx;
p.ivy = pi.ivy;
p.ivz = pi.ivz;
p.iabsv = pi.iabsv;
}
}
deposit(pi.iabsv, eCompElec*eweight); //deposition the energy of the electron
}
else // Not a Compton, deposit all energy here
{
deposit(pi.iabsv, pi.E*eweight);
}
}
else // The interaction was rejected.
{
if (isplit == keepID)
{
//copy energy and position. Direction is actually unchanged
p.E = pi.E;
p.x = pi.x;
p.y = pi.y;
p.z = pi.z;
p.ivx = pi.ivx;
p.ivy = pi.ivy;
p.ivz = pi.ivz;
p.iabsv = pi.iabsv;
}
}
}
if (p.E < EAbsPhoton)
{
//if (p.E > 0) deposit(p.iabsv, p.E*p.weight);
return;
}
}
}
__global__ void
//__launch_bounds__(128, 16)
gZeusSmartRun(ParticleR* pInit) //let's implement in a simple way first
{
unsigned int it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
GRNG rng = RNGState[it]; //get the RNG status
//ParticleR* pars = (ParticleR*)sharedMem;
//ParticleR& p = pars[threadIdx.x]; //current particle to process
ParticleR p; //current particle to process
int hasSec = 0;
int cur = 0;//reset the current particle index
int NT = blockDim.x * gridDim.x;
while (true)
{
//first try to fetch particle from particle stack, and then try to fetch from the particle buffer
if (hasSec)
{
p = StackBuff[NStackDepth*it];
hasSec = 0;
}
else //fetch from the buffer
{
while (true)
{
if (cur == NBatch) // exhausted the buffer
{
RNGState[it] = rng; //record the status of current RNG
return; // exit this thread
}
p = pInit[cur*NT + it];
//p = pInit[it*NBatch + cur];
++cur;
if (lineIn(p)) break;
}
}
smartPhotonRun(p, rng, hasSec);
}
}
__global__ void gZeusRun(ParticleR* pInit) //let's implement in a simple way first
{
unsigned int it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
GRNG rng = RNGState[it]; //get the RNG status
//ParticleR* pars = (ParticleR*)sharedMem;
//ParticleR& p = pars[threadIdx.x]; //current particle to process
ParticleR p; //current particle to process
int hasSec = 0;
int cur = 0;//reset the current particle index
while (true)
{
//first try to fetch particle from particle stack, and then try to fetch from the particle buffer
if (hasSec)
{
p = StackBuff[NStackDepth*it];
hasSec = 0;
}
else //fetch from the buffer
{
while (true)
{
if (cur == NBatch) // exhausted the buffer
{
RNGState[it] = rng; //record the status of current RNG
return; // exit this thread
}
p = pInit[it*NBatch + cur];
++cur;
if (lineIn(p)) break;
}
}
photonRun(p, rng, hasSec);
}
}
__global__ void gZeusComptonRun(ParticleR* pInit) //let's implement in a simple way first
{
unsigned int it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
GRNG rng = RNGState[it]; //get the RNG status
ParticleR p; //current particle to process
int cur = 0;//reset the current particle index
while (true)
{
while (true)
{
if (cur == NBatch) // exhausted the buffer
{
RNGState[it] = rng; //record the status of current RNG
return; // exit this thread
}
p = pInit[it*NBatch + cur];
++cur;
if (lineIn(p)) break;
}
ComptonPhotonRun(p, rng);
}
}
__global__ void gZeusSmartComptonRun(ParticleR* pInit) //let's implement in a simple way first
{
unsigned int it = blockIdx.x * blockDim.x + threadIdx.x; //thread index
GRNG rng = RNGState[it]; //get the RNG status
ParticleR p; //current particle to process
int cur = 0;//reset the current particle index
int NT = blockDim.x * gridDim.x;
while (true)
{
while (true)
{
if (cur == NBatch) // exhausted the buffer
{
RNGState[it] = rng; //record the status of current RNG
return; // exit this thread
}
p = pInit[cur*NT + it];
++cur;
if (lineIn(p)) break;
}
SmartComptonPhotonRun(p, rng);
}
}
/*>>>>>>>>>>>>>>>>>>>>>>>>>>>>> end: Kernel definitions >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>*/
/*<<<<<<<<<<<<<<<<<<<<<<<<< start: tool functions of cuda <<<<<<<<<<<<<<<<<<<<<<<<<<<<<*/
void cudaErrorCheck(cudaError_t cudaStatus,const char* func)
{
if (cudaSuccess != cudaStatus)
{
if (func) Log("cuda error: %s in function module-> %s\n", cudaGetErrorString(cudaStatus), func);
else Log("cuda error: %s\n", cudaGetErrorString(cudaStatus));
exitApp("cuda function call failed!");
}
}
void inline cudaKernelCheck(int i, int it)
{
#ifdef CUDA_KERNEL_CHECK
#ifdef DEBUG
cudaDeviceSynchronize();//make sure all kernels are finished
cudaError_t cudaStatus = cudaGetLastError();
if (cudaSuccess != cudaStatus)
{
if (it != -1) Log("thread id %d with cuda kernel number %d error: %s", it, i, cudaGetErrorString(cudaStatus));
else Log("cuda kernel number %d error: %s", i, cudaGetErrorString(cudaStatus));
exitApp("cuda execuation error in executeJob()");
}
#endif
#endif
}
int ConvertSMVer2Cores(int major, int minor)
{
// Defines for GPU Architecture types (using the SM version to determine the # of cores per SM
typedef struct
{
int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version
int Cores;
} sSMtoCores;
sSMtoCores nGpuArchCoresPerSM[] =
{
{ 0x10, 8 }, // Tesla Generation (SM 1.0) G80 class
{ 0x11, 8 }, // Tesla Generation (SM 1.1) G8x class
{ 0x12, 8 }, // Tesla Generation (SM 1.2) G9x class
{ 0x13, 8 }, // Tesla Generation (SM 1.3) GT200 class
{ 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class
{ 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class
{ 0x30, 192 }, // Kepler Generation (SM 3.0) GK10x class
{ 0x32, 192 }, // Kepler Generation (SM 3.2) GK10x class
{ 0x35, 192 }, // Kepler Generation (SM 3.5) GK11x class
{ 0x37, 192 }, // Kepler Generation (SM 3.7) GK21x class
{ 0x50, 128 }, // Maxwell Generation (SM 5.0) GM10x class
{ -1, -1 }
};
int index = 0;
while (nGpuArchCoresPerSM[index].SM != -1)
{
if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor))
{
return nGpuArchCoresPerSM[index].Cores;
}
index++;
}
// If we don't find the values, we default use the previous one to run properly
printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores);
return nGpuArchCoresPerSM[index - 1].Cores;
}
int cudaGetMaxGflopsDeviceID(vector<GPUConfig>& gc) // This function returns the best GPU (with maximum GFLOPS)
{
int current_device = 0, sm_per_multiproc = 0;
int best_SM_arch = 0;
int devices_prohibited = 0;
unsigned long long max_compute_perf = 0;
cudaDeviceProp deviceProp;
// Find the best major SM Architecture GPU device
for (unsigned int i = 0; i < gc.size(); ++i)
{
current_device = gc[i].id;
cudaGetDeviceProperties(&deviceProp, current_device);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != cudaComputeModeProhibited)
{
if (deviceProp.major > 0 && deviceProp.major < 9999)
{
best_SM_arch = max(best_SM_arch, deviceProp.major);
}
}
else
{
devices_prohibited++;
}
}
// Find the best CUDA capable GPU device
int gc_i = 0;
for (unsigned int i = 0; i < gc.size(); ++i)
{
current_device = gc[i].id;
cudaGetDeviceProperties(&deviceProp, current_device);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != cudaComputeModeProhibited)
{
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
sm_per_multiproc = 1;
}
else
{
sm_per_multiproc = ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
unsigned long long compute_perf = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
if (compute_perf > max_compute_perf)
{
// If we find GPU with SM major > 2, search only these
if (best_SM_arch > 2)
{
// If our device==dest_SM_arch, choose this, or else pass
if (deviceProp.major == best_SM_arch)
{
max_compute_perf = compute_perf;
gc_i = i;
}
}
else
{
max_compute_perf = compute_perf;
gc_i = i;
}
}
else if (compute_perf = max_compute_perf)
{
if (gc[gc_i].id == 0) gc_i = i; //if GPUs' flops are identical, don't choose 0 device because the OS would use it in priority
}
}
}
return gc_i; // max_perf_device;
}
bool speedCompareFunc(pair<unsigned long long, int> lhs, pair<unsigned long long, int> rhs)
{
return lhs.first > rhs.first;
}
void cudaGetGflopsList(vector<int>& speedList) // This function returns the best GPU (with maximum GFLOPS)
{
cudaDeviceProp deviceProp;
int devCount = 0;
cudaErrorCheck(cudaGetDeviceCount(&devCount));
if (devCount < 1) exitApp("Cannot find any CUDA-capable GPU on your computer!");
vector<pair<unsigned long long, int>> speed;
// Find the best major SM Architecture GPU device
for (int i = 0; i < devCount; ++i)
{
cudaGetDeviceProperties(&deviceProp, i);
// If this GPU is not running on Compute Mode prohibited, then we can add it to the list
if (deviceProp.computeMode != cudaComputeModeProhibited)
{
int sm_per_multiproc = 1;
if (deviceProp.major == 9999 && deviceProp.minor == 9999)
{
sm_per_multiproc = 1;
}
else
{
sm_per_multiproc = ConvertSMVer2Cores(deviceProp.major, deviceProp.minor);
}
unsigned long long compute_perf = (unsigned long long) deviceProp.multiProcessorCount * sm_per_multiproc * deviceProp.clockRate;
speed.push_back(make_pair(compute_perf, i));
}
}
sort(speed.begin(), speed.end(), speedCompareFunc);
int ng = (int)speed.size();
speedList.resize(ng);
for (int i = 0; i < ng; ++i) speedList[i] = speed[i].second;
}
void printGPUProperties(int i)
{
// Get device properties
Log("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
Log("Major revision number: %d\n", devProp.major);
Log("Minor revision number: %d\n", devProp.minor);
Log("Name: %s\n", devProp.name);
Log("Total global memory: %lu\n", devProp.totalGlobalMem);
Log("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock);
Log("Total registers per block: %d\n", devProp.regsPerBlock);
Log("Warp size: %d\n", devProp.warpSize);
Log("Maximum memory pitch: %lu\n", devProp.memPitch);
Log("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
Log("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
Log("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]);
Log("Clock rate: %d\n", devProp.clockRate);
Log("Total constant memory: %lu\n", devProp.totalConstMem);
Log("Texture alignment: %lu\n", devProp.textureAlignment);
Log("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
Log("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
Log("Number of total cores: %d\n", ConvertSMVer2Cores(devProp.major, devProp.minor)*devProp.multiProcessorCount);
Log("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
/*>>>>>>>>>>>>>>>>>>>>>>>>>> end: tool functions of cuda >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>*/
/*<<<<<<<<<<<<<<<<<<<<<<<<< start: GZEUS method definitions <<<<<<<<<<<<<<<<<<<<<<<<<<*/
void GZEUS::phantom2GPU() //copy phantom info to GPU. Make sure the phantom has loaded data
{
int NVoxel = _phant->getVoxelNum();
for (unsigned int i = 0; i < gc.size(); ++i)
{
cudaErrorCheck(cudaSetDevice(gc[i].id));
//resize data in the GPU
cudaErrorCheck(cudaMalloc(&gc[i].d_ph, NVoxel*sizeof(SFloat)),"resize ph");
cudaErrorCheck(cudaMemcpyToSymbol(ph, &gc[i].d_ph, sizeof(SFloat *)),"copy ph pointer to GPU constant"); //set the array pointer
cudaErrorCheck(cudaMalloc(&gc[i].d_doseScore, NVoxel*sizeof(SFloat)),"resize doseScore");
cudaErrorCheck(cudaMemcpyToSymbol(doseScore, &gc[i].d_doseScore, sizeof(SFloat *)), "copy doseScore pointer to GPU constant"); //set the array pointer
//set the initial value of phantom and dose counter
cudaErrorCheck(cudaMemcpy(gc[i].d_ph, _phant->ph.getP(), sizeof(SFloat)*NVoxel, cudaMemcpyHostToDevice), "init the value of ph in GPU");
cudaErrorCheck(cudaMemcpy(gc[i].d_doseScore, _phant->dose.getP(), sizeof(SFloat)*NVoxel, cudaMemcpyHostToDevice), "init the value of doseScore in GPU");
//copy the rest constant
cudaErrorCheck(cudaMemcpyToSymbol(NX, &_phant->NX, sizeof(int)), "copy NX to GPU");
cudaErrorCheck(cudaMemcpyToSymbol(NY, &_phant->NY, sizeof(int)), "copy NY to GPU");
cudaErrorCheck(cudaMemcpyToSymbol(NZ, &_phant->NZ, sizeof(int)), "copy NZ to GPU");
ZFloat temp = (ZFloat)_phant->DX;
cudaErrorCheck(cudaMemcpyToSymbol(DX, &temp, sizeof(ZFloat)), "copy DX to GPU");
temp = (ZFloat)(1/_phant->DX);
cudaErrorCheck(cudaMemcpyToSymbol(InvDX, &temp, sizeof(ZFloat)), "copy InvDX to GPU");
temp = (ZFloat)_phant->DY;
cudaErrorCheck(cudaMemcpyToSymbol(DY, &temp, sizeof(ZFloat)), "copy DY to GPU");
temp = (ZFloat)(1 / _phant->DY);
cudaErrorCheck(cudaMemcpyToSymbol(InvDY, &temp, sizeof(ZFloat)), "copy InvDY to GPU");
temp = (ZFloat)_phant->DZ;
cudaErrorCheck(cudaMemcpyToSymbol(DZ, &temp, sizeof(ZFloat)), "copy DZ to GPU");
temp = (ZFloat)(1 / _phant->DZ);
cudaErrorCheck(cudaMemcpyToSymbol(InvDZ, &temp, sizeof(ZFloat)), "copy InvDZ to GPU");
temp = (ZFloat)_phant->LX;
cudaErrorCheck(cudaMemcpyToSymbol(LX, &temp, sizeof(ZFloat)), "copy LX to GPU");
temp = (ZFloat)_phant->LY;
cudaErrorCheck(cudaMemcpyToSymbol(LY, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->LZ;
cudaErrorCheck(cudaMemcpyToSymbol(LZ, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->xo;
cudaErrorCheck(cudaMemcpyToSymbol(xo, &temp, sizeof(ZFloat)), "copy xo to GPU");
temp = (ZFloat)_phant->yo;
cudaErrorCheck(cudaMemcpyToSymbol(yo, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->zo;
cudaErrorCheck(cudaMemcpyToSymbol(zo, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->Bx;
cudaErrorCheck(cudaMemcpyToSymbol(Bx, &temp, sizeof(ZFloat)), "copy Bx to GPU");
temp = (ZFloat)_phant->By;
cudaErrorCheck(cudaMemcpyToSymbol(By, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->Bz;
cudaErrorCheck(cudaMemcpyToSymbol(Bz, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->MaxDensity;
cudaErrorCheck(cudaMemcpyToSymbol(MaxDensity, &temp, sizeof(ZFloat)));
temp = (ZFloat)_phant->rf;
cudaErrorCheck(cudaMemcpyToSymbol(rf, &temp, sizeof(ZFloat)));
cudaErrorCheck(cudaMemcpyToSymbol(uniform, &_phant->uniform, sizeof(int)), "copy uniform to GPU");
}
}
void GZEUS::initGPU()
{
RunTimeCounter rc;
if (!initWaterCS("ZeusCrossSections.binary", gc)) exitApp("Cannot initiate the water cross sections");
int NGPU = (int)gc.size();
ConfigFile* zcf = _cf->getBlock("ZEUS");
if (zcf == NULL) zcf = _cf->getBlock("PHANTOM"); //in case cannot find the Zeus block; use default values instead
int split = 50, h_NStackDepth = 40;
zcf->getValue("NMaxSplit", split);
zcf->getValue("Particle stack depth", h_NStackDepth);
string str = "yes";
int h_fixedSplit = 1; //default yes
if (zcf->getValue("Fixed Split", str) && str.compare("yes") != 0) h_fixedSplit = 0;
int h_simuElectron = 1; //default yes
if (zcf->getValue("Simulate electron", str) && str.compare("yes") != 0) h_simuElectron = 0;
int h_forwardDetection = 1;//default yes
if (zcf->getValue("Forward detection", str) && str.compare("yes") != 0) h_forwardDetection = 0;
double fv = 0;
ZFloat h_EAbsPhoton = 50e3; //unit eV
if (zcf->getValue("EAbsPhoton", fv)) h_EAbsPhoton = (ZFloat)fv;
ZFloat h_EAbsElectron = 50e3; //unit eV
if (zcf->getValue("EAbsElectron", fv)) h_EAbsElectron = (ZFloat)fv;
ZFloat h_EMaxCSDA = 200e3; //unit eV
if (zcf->getValue("EMaxCSDA", fv)) h_EMaxCSDA = (ZFloat)fv;
for (int i = 0; i < NGPU; ++i) //for each GPU
{
cudaErrorCheck(cudaSetDevice(gc[i].id));
int NGPUThread = gc[i].NBlock*gc[i].BlockSize;
//resize initial particle memory in GPU
cudaErrorCheck(cudaMalloc(&gc[i].d_InitParsA, NGPUThread * gc[i].NBatch * sizeof(ParticleR)));
cudaErrorCheck(cudaMemcpyToSymbol(InitParsA, &gc[i].d_InitParsA, sizeof(ParticleR *)));
cudaErrorCheck(cudaMalloc(&gc[i].d_InitParsB, NGPUThread * gc[i].NBatch * sizeof(ParticleR)));
cudaErrorCheck(cudaMemcpyToSymbol(InitParsB, &gc[i].d_InitParsB, sizeof(ParticleR *)));
//resize memory for the particle stack in GPU
cudaErrorCheck(cudaMalloc(&gc[i].d_stackBuff, NGPUThread * h_NStackDepth* sizeof(ParticleR)));
cudaErrorCheck(cudaMemcpyToSymbol(StackBuff, &gc[i].d_stackBuff, sizeof(ParticleR*)));
cudaErrorCheck(cudaMemcpyToSymbol(NStackDepth, &h_NStackDepth, sizeof(int)));
//resize memory for the GRNG status in GPU
cudaErrorCheck(cudaMalloc(&gc[i].d_RNGState, NGPUThread* sizeof(GRNG)));
cudaErrorCheck(cudaMemcpyToSymbol(RNGState, &gc[i].d_RNGState, sizeof(GRNG*)));
cudaErrorCheck(cudaMemcpyToSymbol(NBatch, &gc[i].NBatch, sizeof(int)));
cudaErrorCheck(cudaMemcpyToSymbol(NMaxSplit, &split, sizeof(int)));
cudaErrorCheck(cudaMemcpyToSymbol(FixedSplit, &h_fixedSplit, sizeof(int)));
cudaErrorCheck(cudaMemcpyToSymbol(SIMU_ELECTRON, &h_simuElectron, sizeof(int)));
cudaErrorCheck(cudaMemcpyToSymbol(ForwardDetect, &h_forwardDetection, sizeof(int)));
cudaErrorCheck(cudaMemcpyToSymbol(EAbsPhoton, &h_EAbsPhoton, sizeof(ZFloat)));
cudaErrorCheck(cudaMemcpyToSymbol(EAbsElectron, &h_EAbsElectron, sizeof(ZFloat)));
cudaErrorCheck(cudaMemcpyToSymbol(EMaxCSDA, &h_EMaxCSDA, sizeof(ZFloat)));
}
Log("\nIt costs %f seconds to init GPU ", rc.stop());
}
void GZEUS::freeGPU()//do some clean up
{
for (unsigned int i = 0; i < gc.size(); ++i)
{
cudaErrorCheck(cudaSetDevice(gc[i].id));
cudaErrorCheck(cudaFree(gc[i].d_InitParsA));
cudaErrorCheck(cudaFree(gc[i].d_InitParsB));
cudaErrorCheck(cudaFree(gc[i].d_stackBuff));
cudaErrorCheck(cudaFree(gc[i].d_RNGState));
cudaErrorCheck(cudaFree(gc[i].d_ph));
cudaErrorCheck(cudaFree(gc[i].d_doseScore));
gc[i].destroyStream();
}
}
void GZEUS::init(ConfigFile* cf)
{
_cf = cf;
initGPU();
ConfigFile* ph_cf = cf->getBlock("PHANTOM");
_phant = new Phantom;
_phant->loadPhantom(ph_cf);
string lastDoseFile;
if (cf->getValue("proceed last simulation", lastDoseFile) && lastDoseFile.compare("yes") == 0)
{
cf->getValue("output file name", lastDoseFile);
lastDoseFile += ".dose";
if (!_phant->previousDose(lastDoseFile.c_str())) exitApp("Cannot load last dose file to continue the simulation!");
else Log("load last dose file successfully with %.0f existing histories", _phant->getHist());
}
SourceHead_GetPrescrition(&(_phant->prescriptionDose), &(_phant->treatmentFraction));
phantom2GPU();
}
int GZEUS::getGPUConfig(ConfigFile* gcf)
{
if (NULL == gcf) exitApp("cannot find GPU configuration!");
int devCount = 0;
cudaErrorCheck(cudaGetDeviceCount(&devCount));
if (devCount < 1) exitApp("Cannot find any CUDA-capable GPU on your computer!");
string GPU_Query;
gcf->getValue("GPU Query", GPU_Query);
if (GPU_Query.compare("yes") == 0)
{
int devCount;
cudaErrorCheck(cudaGetDeviceCount(&devCount));
Log("There are %d CUDA devices listed as follow:\n", devCount);
for (int i = 0; i < devCount; ++i) printGPUProperties(i);
printf("\nDo you want to continue executing GPU computation? y/n\n");
if (getchar() != 'y') exit(0);
}
int NBlock = 128, BlockSize = 256, NBatch = 100, GRNG_Refill_Period = 70, Source_Reuse_Times = 10;
string rngStat;
gcf->getValue("GPU Block Num", NBlock);
gcf->getValue("GPU Block Dim", BlockSize);
gcf->getValue("GPU Batch Num", NBatch);
gcf->getValue("GPU RNG Statistic", rngStat);
gcf->getValue("GRNG Refill Period", GRNG_Refill_Period);
gcf->getValue("Source Reuse Times", Source_Reuse_Times);
//double GPU_Weight = 0;
GPUConfig gpuc;
//gpuc.id = GPU_Index;
gpuc.NBlock = NBlock;
gpuc.BlockSize = BlockSize;
gpuc.NBatch = NBatch;
gpuc.refillPeriod = GRNG_Refill_Period;
gpuc.SourceReuseTimes = Source_Reuse_Times;
vector<int> GPU_in_speed;
cudaGetGflopsList(GPU_in_speed);
vector<int> GPU_Index;
if (!gcf->getValue("GPU Index", GPU_Index)) //no specific GPU index
{
int NGPU = 0;
gcf->getValue("GPU Num", NGPU);
if (NGPU <= 0) exitApp("Invalid GPU index configuration!");
int NGPUAvailable = (int)GPU_in_speed.size();
for (int i = 0; i < NGPU; ++i)
{
if (i < NGPUAvailable) GPU_Index.push_back(GPU_in_speed[i]);
else break;
}
}
for (unsigned int i = 0; i < GPU_Index.size(); ++i)
{
if (GPU_Index[i] >= 0 && GPU_Index[i] < devCount)
{
gpuc.id = GPU_Index[i];
gc.push_back(gpuc);
}
else exitApp("Invalid GPU index");
}
//find the best GPU as the main thread, and optimize the work load
int main_id = 0;
if (gc.size() > 1) main_id = cudaGetMaxGflopsDeviceID(gc);
Log("/******************* The following GPU will be used ***************************/");
for (unsigned int i = 0; i < gc.size(); ++i) printGPUProperties(gc[i].id);
Log("/************************ End GPU description *********************************/\n\n");
//create streams of GPU control
for (unsigned int i = 0; i < gc.size(); ++i) gc[i].createStream();
return main_id;
}
/*>>>>>>>>>>>>>>>>>>>>>>>>>> end: GZEUS method definitions >>>>>>>>>>>>>>>>>>>>>>>>>>>>*/
RunTimeCounter sourceCounter;
void getSource(SourcePool* sp, volatile int* hist)
{
sourceCounter.start();
*hist = sp->prepareCopy();//prepare one batch before any run
Log("time cost to generate particle = %f s", sourceCounter.stop());
}
int NThread, NProcess, pID;
bool b_peek = false;
BinaryFile gBF; //to store the raw dose data
ProgressCallBack progressCB = NULL;
PeekDoseCallBack peekDoseCB = NULL;
JobFinishedCallBack jobFinishedCB = NULL;
LogCallBack exitCB = NULL;
// Begin: essential for command line call
bool b_thread_active = false;
bool b_abort = false;
#ifdef WIN32
#define DEXPORT __declspec(dllexport)
#else
#define DEXPORT __attribute__ ((visibility ("default")))
#endif
extern "C" DEXPORT void startSimulation(const char* configFileName, MPS& configMacro, bool bWait) //launch a thread to do the simulation
{
b_thread_active = true;
std::thread thd(executeJob, configFileName, 1, std::ref(configMacro));
if (bWait) thd.join();
else thd.detach();
}
extern "C" DEXPORT void stopSimulation()
{
if (b_thread_active) b_abort = true; //only if the thread is on
}
// End: essential for command line call
extern "C" DEXPORT void peekDose() //mark to generate intermediate dose
{
if (b_thread_active) b_peek = true; //only if the thread is on
}
//interfaces to register the call-back function
extern "C" DEXPORT void setProgressCallBack(ProgressCallBack pcb){ progressCB = pcb; }
extern "C" DEXPORT void setPeekDoseCallBack(PeekDoseCallBack pcb){ peekDoseCB = pcb; }
extern "C" DEXPORT void setJobFinishedCallBack(JobFinishedCallBack jcb){ jobFinishedCB = jcb; }
extern "C" DEXPORT void setLogCallBack(LogCallBack lcb){ Log.setCallBack(lcb); }
extern "C" DEXPORT void setExitCallBack(LogCallBack ecb){ exitCB = ecb; }
void exitApp(const char *inf)
{
Log("fatal error: %s", inf);
Log.flush(); //flush the log file's buffer
if (exitCB) exitCB(inf); // let the call back function display the error
else
{
Log("\nPress enter key to exit...");
getchar();
}
exit(-1);
}
void executeJob(const char* configFileName, double processWeight, MPS& configMacro) //execute one job according to the config file
{
RunTimeCounter totalTime;
ConfigFile cf(configFileName); //parse the total config file
//find out where is the config file located
string cdir(configFileName);
size_t pos = cdir.find_last_of("\\/");
cdir.erase(++pos, string::npos);
cf.macroReplace(string("$cdir$"), cdir);
cf.macroReplace(configMacro);
string logDir, logAppend, logDescription;
cf.getValue("log file directory", logDir);
cf.getValue("log file append", logAppend);
cf.getValue("log description", logDescription);
if (0 == logAppend.compare("no")) logAppend = "w";
else logAppend = "a";
Log.setLogName(pID, logAppend, logDir);//start log recording for this job
Log("The job config file name = %s", configFileName);
if (logDescription.compare("NA") != 0) Log("Short description: %s", logDescription.c_str());
Log("Start log time = %s\n\n", Log.timeNow());
double fNSIMU = 1e7;
cf.getValue("NSIMU", fNSIMU);
fNSIMU *= processWeight; //real workload on this node
double targetErr = -1;
cf.getValue("target uncertainty", targetErr);
double targetErr2 = targetErr*targetErr;
double thresholdRegion = 0.5; //default 50% of the max dose as threshold for uncertainty calculation
cf.getValue("threshold region", thresholdRegion);
string outname;
cf.getValue("output file name", outname);
ConfigFile* zcf = cf.getBlock("ZEUS");
if (zcf == NULL) zcf = cf.getBlock("PHANTOM"); //in case cannot find the Zeus block; use default values instead
int nsplit = 50;
zcf->getValue("NMaxSplit", nsplit);
bool simuElectron = true;
string str;
if (zcf->getValue("Simulate electron", str) && str.compare("yes") != 0) simuElectron = false;
//search and config the GPU part ->> get gc
GZEUS zeus;
vector<GPUConfig>& gc = zeus.gc;//makes the name shorter
ConfigFile *gcf = cf.getBlock("GPU");
int main_id = zeus.getGPUConfig(gcf);
//initialize GZEUS and SourceHead by configurations
ConfigFile *scf = cf.getBlock("SOURCEHEAD");
SourceHead_Init(scf);
#ifdef SOURCE_STATIStICS
//for the source energy statistics
PRNG _rng;
_rng.init(1234);
Particle pars[100];
int NES = int(1e6);
double* Ens = new double[NES];
int nsam = 0;
while (nsam < NES)
{
int np = SourceHead_Sample(&_rng, pars);
for (int i = 0; i < np; ++i)
{
Ens[nsam] = pars[i].E;
++nsam;
if (nsam >= NES) break;
}
}
FILE* fps = fopen("E.txt", "wb");
fwrite(Ens, NES, 1, fps);
fclose(fps);
delete[] Ens;
#endif
//string dataDir;
//scf->getValue("DataDir",dataDir);
//if (!ZeusData_load(dataDir.c_str())) exitApp("Cannot load Zeus cross-sections correctly!");
zeus.init(&cf); //prepare GPU data and phantom
fNSIMU -= zeus._phant->Hist; //subtract previous histories
if (fNSIMU <= 0)
{
Log("Don't need to run any more history! Skip this task...\n\n");
return;
}
//config the source particle pool
int NGT = 1, NGStack = 400;
scf->getValue("NThread", NGT);
scf->getValue("Sample Stack Depth", NGStack);
int NOneFetch = gc[0].getNOneBatch();
SourcePool sp(&gc, NGT, NOneFetch, zeus._phant, NGStack);
Log("\nCalculating dose, please wait patiently...\n\n");
cf.getValue("log short mode", logDescription);
if (logDescription.compare("yes") == 0) Log.shortMode(true);
RunTimeCounter rc; //count the calculating time
//note history number != particle number in the source pool
const int NGPU = (int)gc.size();
//history number generated once by the source pool, only modified by one thread,
//but accessed by multiple threads. It can be read only after the generating thread finished.
volatile int histNew = 0; //histNew isn't always the same as hisAdd because histNew is modified in an isolated thread
volatile int histAdd = 0; //this variable is shared by all threads, so add the key word "volatile" for safe
const int firstSeed = zeus._phant->seedBegin( NGPU *gc[0].NBlock*gc[0].BlockSize);// note it's different from gPENELOPE
std::thread sthread; //source generating thread, unattached
RunTimeCounter kernelCounter;
RunTimeCounter copyCounter;
sthread = std::thread(&getSource, &sp, &histNew);
vector<SFloat*> dose(NGPU);//to store dose from all GPU cards
vector<SFloat*> uncertainty(NGPU);
int NVoxel = zeus._phant->getVoxelNum();
int nBatch = 0; //count the batch number that has been done
const int NBlock = gc[0].NBlock;
const int BlockSize = gc[0].BlockSize;
// module of auto reuse source particles
volatile int Source_Reuse_Times = gc[0].SourceReuseTimes; //maybe modified by one thread, so use volatile for safe
bool autoSourceReuse = false; //used for auto reuse
if (Source_Reuse_Times < 1)
{
autoSourceReuse = true;
Source_Reuse_Times = 1;
}
bool b_targetErrReached = false;
//each thread takes care of one GPU
#ifdef USE_OPENMP
#pragma omp parallel num_threads(NGPU)
#endif
{
int it = omp_get_thread_num();
cudaErrorCheck(cudaSetDevice(gc[it].id)); //set which GPU this thread will operate on
#ifdef USE_SINGLE_PRECISION
WaterQS aWaterQS((float*)h_WaterQSData, NQSURFACE_Q, NQSURFACE_E);
#endif
cudaErrorCheck(cudaDeviceSetCacheConfig(cudaFuncCachePreferL1));
double fHMax = fNSIMU / NGPU;
double hist = 0;
//std::mt19937 mtrng((unsigned)std::chrono::system_clock::now().time_since_epoch().count());
//generate a random thread for each working thread
//int seed = int((it + 1) * 12345789 * (double(mtrng()) / mtrng.max() + 0.5)); //???
SFloat* gdose = new SFloat[NVoxel]; //to fetch temporary dose from GPU
memset(gdose, 0, sizeof(SFloat)*NVoxel);
// resize memory for CPU end storage
dose[it] = new SFloat[NVoxel]; //to store the final dose of this thread
uncertainty[it] = new SFloat[NVoxel]; //to store the uncertainty
// initialize the dose score
for (int i = 0; i < NVoxel; ++i)
{
dose[it][i] = zeus._phant->dose[i] / NGPU;
uncertainty[it][i] = zeus._phant->uncertainty[i] / NGPU;
}
int seed = firstSeed + it*NBlock*BlockSize; //make sure all seeds are unique; note it's different from gPENELOPE
initThreads << <NBlock, BlockSize >> >(seed);
cudaKernelCheck(0);
int source_reuse = Source_Reuse_Times; //count how many times has been reused; force to initially generate incident particles
ParticleR* pInit = NULL;
while(true) //calculating main loop, end when hist >= fHMax
{
//need to regenerate initial particles
if (source_reuse >= Source_Reuse_Times)
{
if (it == main_id) //it's the main thread
{
//wait until the source is ready
sthread.join();
histAdd = histNew; //means the main GPU has histAdd new histories
}
#pragma omp barrier //wait until all GPU threads arrive here
pInit = sp.getAP(it); //update the particle array pointer
source_reuse = 0; //reset the reuse counter
#pragma omp barrier //wait until all GPUs received the data
//if (it == main_id) sthread = std::thread(&getSource, &sp, &histNew);
if (it == main_id) sthread = std::thread(&getSource, &sp, &histNew); //start a fetch prepare
}
++source_reuse; //count how many time the source has been used
hist += histAdd; // histAdd more histories will be simulated
/****************** Begin a batch run on GPU ********************/
if (it == main_id) kernelCounter.start(); //only count the kernel time for the main GPU
int sharedSize = 0; // sizeof(float)*BlockSize * 16;
if (simuElectron)
{
if (nsplit > 1) gZeusSmartRun << <NBlock, BlockSize, sharedSize, gc[it].kernelstream >> >(pInit);
else gZeusRun << <NBlock, BlockSize, sharedSize, gc[it].kernelstream >> >(pInit);
}
else
{
if (nsplit>1) gZeusSmartComptonRun << <NBlock, BlockSize, sharedSize, gc[it].kernelstream >> >(pInit);
else gZeusComptonRun << <NBlock, BlockSize, sharedSize, gc[it].kernelstream >> >(pInit);
}
cudaStreamSynchronize(gc[it].kernelstream);//wait for the kernel to finish
//print the speed information
if (it == main_id)
{
Log("time cost to execute kernels = %f s", kernelCounter.stop());
if (targetErr <= 0)
{
double time = rc.stop(true);
double speed = hist / time;
double rest = 0;
if (fHMax > hist) rest = (fHMax - hist) / speed;
else rest = 0;
Log("GPU processed ------------------------ %3.1f%%, speed = %d h/s\n", hist*100.0 / fHMax, int(speed));
Log("Time escaped = %.1f min, left time expected = %.1f min", time / 60.0, rest / 60.0);
if (progressCB) progressCB(hist*100.0 / fHMax, speed, time, rest, 0);
}
++nBatch;
if (nBatch == 10 && autoSourceReuse) // may change Source_Reuse_Times based on the performance of the first 10 batches
{
int rt = (int)round(sourceCounter.getStoredTime() / kernelCounter.getStoredTime());
if (rt > 1) Source_Reuse_Times = rt;
}
}
/****************** End a batch run on GPU *********************/
//after one batch, we need to fetch dose from GPU to calculate the uncertainty
cudaErrorCheck(cudaMemcpy(gdose, gc[it].d_doseScore, sizeof(SFloat)*NVoxel, cudaMemcpyDeviceToHost)); //fetch the batch dose from GPU
SFloat minv = gdose[0];
SFloat maxv = minv;
for (int i = 0; i < NVoxel; ++i)
{
minv = min(minv, gdose[i]);
maxv = max(maxv, gdose[i]);
dose[it][i] += gdose[i];
uncertainty[it][i] += gdose[i] * gdose[i];
gdose[i] = 0;
}
if (it == main_id) Log("max dose = %g, min dose = %g", maxv, minv);
cudaErrorCheck(cudaMemcpy(gc[it].d_doseScore, gdose, sizeof(SFloat)*NVoxel, cudaMemcpyHostToDevice)); //reset the dose counter in GPU
if (it == main_id && b_peek)
{
SFloat* d = new SFloat[NVoxel];
SFloat* u = new SFloat[NVoxel];
//add up dose and uncertainty in different GPU card
for (int j = 0; j < NVoxel; ++j)
{
d[j] = u[j] = 0;
for (int i = 0; i < NGPU; ++i)
{
d[j] += dose[i][j];
u[j] += uncertainty[i][j];
}
}
//Log("\nOutputing the intermediate dose:");
Phantom tempPhant(*zeus._phant);
double norm = 1.0889e15 * SourceHead_BeamOnTime();
tempPhant.addDose(d, u, NGPU * nBatch, NGPU * hist, norm, thresholdRegion);
delete[] d;
delete[] u;
tempPhant.getBinaryFile(gBF);
b_peek = false;
if (peekDoseCB) peekDoseCB(gBF);
}
if (targetErr > 0) //calculate the uncertainty
{
if (it == main_id)
{
double err2 = zeus._phant->peekUncertainty(dose[it], uncertainty[it], nBatch + zeus._phant->nBatch / NGPU, thresholdRegion);
double histEstimate = hist*err2 / (targetErr2*NGPU);
double time = rc.stop(true);
double speed = hist / time;
double rest = (histEstimate - hist) / speed;
Log("GPU processed ------------------------ %3.1f%%, speed = %d h/s\n", hist*100.0 / histEstimate, int(speed));
Log("Time escaped = %.1f min, left time expected = %.1f min", time / 60.0, rest / 60.0);
if (err2 < targetErr2*NGPU) b_targetErrReached = true;
if (progressCB) progressCB(hist*100.0 / histEstimate, speed, time, rest, sqrt(err2 / NGPU)*100.0);
}
#pragma omp barrier //make sure all work threads got the break signal
if (b_targetErrReached) break; //all work threads will break the while loop
}
if (targetErr <= 0 && hist >= fHMax || b_abort) break;
}
//finish in this GPU thread
gc[it].hist = hist;
if (!b_abort && it == main_id) //if it's aborted, print nothing
{
Log("GPU processed ------------------------ 100%%, speed = %d h/s\n", int(hist / rc.stop(true)));
Log.shortMode(false);
Log("\nWait all GPUs to finish their job...\n");
}
delete[] gdose;
} //end openMP
Log("All GPUs have finished their simulation job! Collecting dose...\n\n");
double totHist = NGPU*gc[0].hist;
SFloat* d = new SFloat[NVoxel];
SFloat* u = new SFloat[NVoxel];
//add up dose and uncertainty in different GPU card
for (int j = 0; j < NVoxel; ++j)
{
d[j] = u[j] = 0;
for (int i = 0; i < NGPU; ++i)
{
d[j] += dose[i][j];
u[j] += uncertainty[i][j];
}
}
for (int i = 0; i < NGPU; ++i)
{
delete[] dose[i];
delete[] uncertainty[i];
}
Log.shortMode(false);
double norm = 1.0889e15 * SourceHead_BeamOnTime();
zeus._phant->addDose(d, u, NGPU * nBatch, totHist, norm, thresholdRegion);
zeus._phant->getBinaryFile(gBF);
delete[] d;
delete[] u;
if (b_abort) outname += "_abort";// make sure it wouldn't overwrite the last file
outname += ".dose";
string vrFormat;
cf.getValue("ViewRay format", vrFormat);
if (vrFormat.compare("yes") == 0) zeus._phant->output(outname.c_str(), 1);
else zeus._phant->output(outname.c_str());
Log("Wait for the source generating thread to finish...");
sthread.join();
SourceHead_Delete(); //release the source head resource safely
Log("Source reuse times = %d", Source_Reuse_Times);
Log("\nTime statistics for main GPU:");
//Log("Total copy time =%.2f s", copyCounter.getStoredTime());
Log("Total kernel time = %.2f s", kernelCounter.getStoredTime());
Log("Total SourceHead time = %.2f s\n\n", sourceCounter.getStoredTime());
Log("Mixed running time = %.2f minutes, total history number = %g", rc.stop(true) / 60.0, totHist);
Log("The overall simulating speed = %d hist/sec\n\n", int(totHist / rc.stop(true)));
Log("End log time = %s\n\n", Log.timeNow());
Log("/##############################################################################/\n\n");
Log.closeFile();
if (jobFinishedCB) jobFinishedCB(b_abort, gBF);
if (b_abort)
{
b_abort = false;
if (jobFinishedCB == NULL) exit(0); //in command line execution mode, exit the program directly
}
b_thread_active = false;
} //end executeJob
|
d3bf26bb18236012fcec716ae1ca5c2151e62902.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void bhsm_backward2(
const float *wxy,
const float *x,
const float *w,
const int *ts,
const int *paths,
const float *codes,
const int *begins,
const int *lens,
const float *gLoss,
const int n_in,
const int max_len,
const int n_ex,
float *gx,
float *gW
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n_ex * max_len) {
int idx = i / max_len;
int offset = i - idx * max_len;
int t = ts[idx];
int begin = begins[t];
int length = lens[t];
if (offset < length) {
int p = begin + offset;
int node = paths[p];
if (y < n_in) {
float g = -gLoss[0] * codes[p] / (1.0f + exp(wxy[i]));
int w_i = (n_in * node) + y;
int x_i = (n_in * idx) + y;
atomicAdd(gx + x_i, g * w[w_i]);
atomicAdd(gW + w_i, g * x[x_i]);
}
}
}
}
|
d3bf26bb18236012fcec716ae1ca5c2151e62902.cu
|
extern "C"
__global__ void bhsm_backward2(
const float *wxy,
const float *x,
const float *w,
const int *ts,
const int *paths,
const float *codes,
const int *begins,
const int *lens,
const float *gLoss,
const int n_in,
const int max_len,
const int n_ex,
float *gx,
float *gW
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n_ex * max_len) {
int idx = i / max_len;
int offset = i - idx * max_len;
int t = ts[idx];
int begin = begins[t];
int length = lens[t];
if (offset < length) {
int p = begin + offset;
int node = paths[p];
if (y < n_in) {
float g = -gLoss[0] * codes[p] / (1.0f + exp(wxy[i]));
int w_i = (n_in * node) + y;
int x_i = (n_in * idx) + y;
atomicAdd(gx + x_i, g * w[w_i]);
atomicAdd(gW + w_i, g * x[x_i]);
}
}
}
}
|
bfc33aff1ce2d132179caa2bfe55f469f96eef92.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 OrthogonalHawk
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*****************************************************************************/
/******************************************************************************
*
* @file falcon_dsp_fir_filter_cuda.cu
* @author OrthogonalHawk
* @date 24-Jan-2020
*
* @brief Implements a CUDA-based FIR filtering operation.
*
* @section DESCRIPTION
*
* Implements the CUDA version of FIR filtering operations. Both a standalone
* function and a class-based tranform object are supported.
*
* @section HISTORY
*
* 24-Jan-2020 OrthogonalHawk File created.
*
*****************************************************************************/
/******************************************************************************
* INCLUDE_FILES
*****************************************************************************/
#include <iostream>
#include <memory>
#include <stdint.h>
#include "transform/falcon_dsp_fir_filter_cuda.h"
#include "utilities/falcon_dsp_cuda_utils.h"
#include "utilities/falcon_dsp_host_timer.h"
/******************************************************************************
* CONSTANTS
*****************************************************************************/
const bool TIMING_LOGS_ENABLED = false;
const uint32_t MAX_NUM_CUDA_THREADS = 1024;
/******************************************************************************
* ENUMS & TYPEDEFS
*****************************************************************************/
/******************************************************************************
* MACROS
*****************************************************************************/
namespace falcon_dsp
{
/******************************************************************************
* FUNCTION IMPLEMENTATION
*****************************************************************************/
/* @brief CUDA implementation of a linear FIR filter vector operation.
* @param[in] filter_coeffs - FIR filter coefficients
* @param[in] in - input vector
* @param[out] out - filtered vector
* @return True if the input vector was filtered as requested;
* false otherwise.
*/
bool fir_filter_cuda(std::vector<std::complex<float>> &coeffs, std::vector<std::complex<int16_t>>& in,
std::vector<std::complex<int16_t>>& out)
{
falcon_dsp_fir_filter_cuda filter_obj(coeffs);
return filter_obj.apply(in, out);
}
bool fir_filter_cuda(std::vector<std::complex<float>> &coeffs, std::vector<std::complex<float>>& in,
std::vector<std::complex<float>>& out)
{
falcon_dsp_fir_filter_cuda filter_obj(coeffs);
return filter_obj.apply(in, out);
}
/* CUDA kernel function that applies an FIR filter. this kernel assumes that the caller
* pads the input with either zeroes or previous input (state) information. */
__global__
void __fir_filter(cuFloatComplex * coeffs,
uint32_t coeff_len,
uint32_t num_output_samples_to_process_per_thread,
cuFloatComplex * in_data,
uint32_t in_data_len,
cuFloatComplex * out_data,
uint32_t out_data_len)
{
/* retrieve the starting data index that corresponds to this thread. given the
* simplified FIR filter equation:
*
* y(n) = x(n)h(0) + x(n-1)h(1) + x(n-2)h(2) + ... x(n-m)h(m)
*
* ^^^ y: output
* x: input
* h: FIR filter coefficients; length 'm'
*
* the start_data_index points to the x(n-m) data value, which is the 'oldest' sample
* that is required to compute y(n).
*/
uint32_t start_data_index = (blockIdx.x * blockDim.x * num_output_samples_to_process_per_thread) +
(threadIdx.x * num_output_samples_to_process_per_thread);
/* the same calculation is equivalent to finding the output offset, or the 'n'
* used to index the output array */
uint32_t output_data_offset = start_data_index;
uint32_t num_padding_samples = coeff_len - 1;
/* catch the case where the output buffer + padding is shorter than
* the input buffer */
if ((out_data_len + num_padding_samples) < in_data_len)
{
return;
}
/* catch the case where the input size is not an integer
* multiple of the thread block size */
if ((start_data_index + num_padding_samples) > in_data_len ||
start_data_index > out_data_len)
{
return;
}
/* the previous checks captured cases where the current thread should do nothing.
* now catch the case where this kernel cannot process the full number of samples
* when the end of the input buffer is reached. note that due to the previous
* check to make sure that the output buffer is at least as long as the input
* buffer it is sufficient to only check the input buffer here */
uint32_t local_num_samples_to_process = num_output_samples_to_process_per_thread;
if ((start_data_index + num_padding_samples + num_output_samples_to_process_per_thread) > in_data_len)
{
local_num_samples_to_process = in_data_len - start_data_index - num_padding_samples;
}
cuFloatComplex *data_ptr = nullptr;
cuFloatComplex *coeff_ptr = nullptr;
cuFloatComplex accum;
/* compute the output values */
for (uint32_t out_sample_idx = 0;
out_sample_idx < local_num_samples_to_process &&
(start_data_index + num_padding_samples + out_sample_idx) < in_data_len &&
(output_data_offset + out_sample_idx) < out_data_len;
++out_sample_idx)
{
/* reset for each new output. data_ptr is pointed to the oldest data that
* is needed for the y(n) output. coeff_ptr is therefore pointed to the
* end of the coefficient array */
data_ptr = &in_data[start_data_index + out_sample_idx];
coeff_ptr = &coeffs[coeff_len - 1]; /* last coefficient in the array */
accum.x = 0;
accum.y = 0;
/* go through all coefficients */
for (uint32_t ii = 0; ii < coeff_len; ++ii)
{
accum = cuCaddf(accum, cuCmulf(*data_ptr++, *coeff_ptr--));
}
/* output computed; save to the output buffer */
out_data[output_data_offset + out_sample_idx] = accum;
}
}
/******************************************************************************
* CLASS IMPLEMENTATION
*****************************************************************************/
falcon_dsp_fir_filter_cuda::falcon_dsp_fir_filter_cuda(std::vector<std::complex<float>> &coeffs)
: falcon_dsp_fir_filter(coeffs),
m_cuda_coeff_data(nullptr),
m_cuda_input_data(nullptr),
m_max_num_input_samples(0),
m_cuda_output_data(nullptr),
m_max_num_output_samples(0)
{
/* allocate CUDA memory for the coefficient information; since these are set
* when the class is constructed and cannot be changed the amount of data
* is known now */
cudaErrChkAssert(hipMallocManaged(&m_cuda_coeff_data,
m_coefficients.size() * sizeof(cuFloatComplex)));
/* copy the coefficient information to the GPU */
cudaErrChkAssert(hipMemcpy(static_cast<void *>(m_cuda_coeff_data),
static_cast<void *>(m_coefficients.data()),
m_coefficients.size() * sizeof(std::complex<float>),
hipMemcpyHostToDevice));
m_input_padding_in_samples = m_coefficients.size() - 1;
/* change the shared memory size to 8 bytes per shared memory bank. this is so that we
* can better handle complex<float> data, which is natively 8 bytes in size */
cudaErrChkAssert(hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte));
}
falcon_dsp_fir_filter_cuda::~falcon_dsp_fir_filter_cuda(void)
{
std::lock_guard<std::mutex> lock(std::mutex);
/* cleanup the CUDA memory that was reserved in the constructor to house
* the coefficient information */
if (m_cuda_coeff_data)
{
cudaErrChk(hipFree(m_cuda_coeff_data));
}
}
bool falcon_dsp_fir_filter_cuda::apply(std::vector<std::complex<int16_t>>& in, std::vector<std::complex<int16_t>>& out)
{
/* create another copy of the data and cast to std::complex<float> */
std::vector<std::complex<float>> tmp_in_vec;
tmp_in_vec.reserve(in.size());
for (auto in_iter = in.begin(); in_iter != in.end(); ++in_iter)
{
tmp_in_vec.push_back(std::complex<float>((*in_iter).real(), (*in_iter).imag()));
}
/* filter the input data */
std::vector<std::complex<float>> tmp_out_vec;
bool ret = apply(tmp_in_vec, tmp_out_vec);
/* cast the filtered output back to std::complex<int16_t> */
for (auto out_iter = tmp_out_vec.begin(); out_iter != tmp_out_vec.end(); ++out_iter)
{
out.push_back(std::complex<int16_t>((*out_iter).real(), (*out_iter).imag()));
}
return ret;
}
bool falcon_dsp_fir_filter_cuda::apply(std::vector<std::complex<float>>& in, std::vector<std::complex<float>>& out)
{
std::lock_guard<std::mutex> lock(std::mutex);
/* sanity check object state */
if (m_coefficients.size() == 0)
{
return false;
}
/* clear the output data structures and resize so that they can hold the filtered
* data. note that by using resize() the vector size is now equal to the requested
* size even without explicitly adding data to the vector, which means that we can
* add data directly into the vector data buffer without worrying about the
* vector size getting mismatched with the buffer contents.
*
* note that the output size will always be equal to the input size since the input
* data is either padded with zeroes or previous state information. */
out.clear();
out.resize(in.size());
/* if there is enough space in previously allocated memory then use it; otherwise
* allocate new memory buffers. it is left as a future feature to specify a maximum
* memory size and process the data in chunks instead of requiring enough GPU
* memory to process the whole vector at once */
/* allocate CUDA memory for the input samples */
if (m_max_num_input_samples < (in.size() + m_input_padding_in_samples))
{
if (m_cuda_input_data)
{
cudaErrChkAssert(hipFree(m_cuda_input_data));
m_cuda_input_data = nullptr;
m_max_num_input_samples = 0;
}
cudaErrChkAssert(hipMallocManaged(&m_cuda_input_data,
(in.size() + m_input_padding_in_samples) * sizeof(std::complex<float>)));
m_max_num_input_samples = in.size() + m_input_padding_in_samples;
}
/* allocate CUDA memory for the output samples */
if (m_max_num_output_samples < in.size())
{
/* clean up existing memory */
if (m_cuda_output_data)
{
cudaErrChkAssert(hipFree(m_cuda_output_data));
m_cuda_output_data = nullptr;
m_max_num_output_samples = 0;
}
/* allocate CUDA unified memory space for the output data */
cudaErrChkAssert(hipMallocManaged(&m_cuda_output_data,
in.size() * sizeof(std::complex<float>)));
m_max_num_output_samples = in.size();
}
/* prepare the padding/state information. state information is stored such that the last
* element in the m_state container should immediately precede the input data. */
std::vector<std::complex<float>> prev_data(m_input_padding_in_samples, std::complex<float>(0.0, 0.0));
auto prev_data_iter = prev_data.rbegin();
auto state_iter = m_state.rbegin();
for (uint32_t ii = 0;
ii < m_input_padding_in_samples &&
prev_data_iter != prev_data.rend() &&
state_iter != m_state.rend();
++ii)
{
*(prev_data_iter++) = *(state_iter++);
}
/* copy the padding/state information to the GPU */
cudaErrChkAssert(hipMemcpy(static_cast<void *>(m_cuda_input_data),
static_cast<void *>(prev_data.data()),
prev_data.size() * sizeof(std::complex<float>),
hipMemcpyHostToDevice));
/* copy the input data to the GPU; note the offset by prev_data.size() to
* provide room for the padding/state samples */
cudaErrChkAssert(hipMemcpy(static_cast<void *>(m_cuda_input_data + prev_data.size()),
static_cast<void *>(in.data()),
in.size() * sizeof(std::complex<float>),
hipMemcpyHostToDevice));
/* run kernel on the GPU */
uint32_t num_samples_per_thread = 1;
uint32_t samples_per_thread_block = num_samples_per_thread * MAX_NUM_CUDA_THREADS;
uint32_t num_thread_blocks = (in.size() + samples_per_thread_block - 1) / samples_per_thread_block;
falcon_dsp::falcon_dsp_host_timer timer("KERNEL", TIMING_LOGS_ENABLED);
hipLaunchKernelGGL(( __fir_filter), dim3(num_thread_blocks), dim3(MAX_NUM_CUDA_THREADS), 0, 0, m_cuda_coeff_data,
m_coefficients.size(),
num_samples_per_thread,
m_cuda_input_data,
m_max_num_input_samples,
m_cuda_output_data,
m_max_num_output_samples);
cudaErrChkAssert(hipPeekAtLastError());
/* wait for GPU to finish before accessing on host */
cudaErrChkAssert(hipDeviceSynchronize());
timer.log_duration("Single Chan Kernel Complete");
/* copy output samples out of CUDA memory */
cudaErrChkAssert(hipMemcpy(static_cast<void *>(out.data()),
static_cast<void *>(m_cuda_output_data),
in.size() * sizeof(std::complex<float>),
hipMemcpyDeviceToHost));
/* finished handling the current data; now update the state array */
_update_state(in);
return out.size() > 0;
}
}
|
bfc33aff1ce2d132179caa2bfe55f469f96eef92.cu
|
/******************************************************************************
*
* MIT License
*
* Copyright (c) 2020 OrthogonalHawk
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
*****************************************************************************/
/******************************************************************************
*
* @file falcon_dsp_fir_filter_cuda.cu
* @author OrthogonalHawk
* @date 24-Jan-2020
*
* @brief Implements a CUDA-based FIR filtering operation.
*
* @section DESCRIPTION
*
* Implements the CUDA version of FIR filtering operations. Both a standalone
* function and a class-based tranform object are supported.
*
* @section HISTORY
*
* 24-Jan-2020 OrthogonalHawk File created.
*
*****************************************************************************/
/******************************************************************************
* INCLUDE_FILES
*****************************************************************************/
#include <iostream>
#include <memory>
#include <stdint.h>
#include "transform/falcon_dsp_fir_filter_cuda.h"
#include "utilities/falcon_dsp_cuda_utils.h"
#include "utilities/falcon_dsp_host_timer.h"
/******************************************************************************
* CONSTANTS
*****************************************************************************/
const bool TIMING_LOGS_ENABLED = false;
const uint32_t MAX_NUM_CUDA_THREADS = 1024;
/******************************************************************************
* ENUMS & TYPEDEFS
*****************************************************************************/
/******************************************************************************
* MACROS
*****************************************************************************/
namespace falcon_dsp
{
/******************************************************************************
* FUNCTION IMPLEMENTATION
*****************************************************************************/
/* @brief CUDA implementation of a linear FIR filter vector operation.
* @param[in] filter_coeffs - FIR filter coefficients
* @param[in] in - input vector
* @param[out] out - filtered vector
* @return True if the input vector was filtered as requested;
* false otherwise.
*/
bool fir_filter_cuda(std::vector<std::complex<float>> &coeffs, std::vector<std::complex<int16_t>>& in,
std::vector<std::complex<int16_t>>& out)
{
falcon_dsp_fir_filter_cuda filter_obj(coeffs);
return filter_obj.apply(in, out);
}
bool fir_filter_cuda(std::vector<std::complex<float>> &coeffs, std::vector<std::complex<float>>& in,
std::vector<std::complex<float>>& out)
{
falcon_dsp_fir_filter_cuda filter_obj(coeffs);
return filter_obj.apply(in, out);
}
/* CUDA kernel function that applies an FIR filter. this kernel assumes that the caller
* pads the input with either zeroes or previous input (state) information. */
__global__
void __fir_filter(cuFloatComplex * coeffs,
uint32_t coeff_len,
uint32_t num_output_samples_to_process_per_thread,
cuFloatComplex * in_data,
uint32_t in_data_len,
cuFloatComplex * out_data,
uint32_t out_data_len)
{
/* retrieve the starting data index that corresponds to this thread. given the
* simplified FIR filter equation:
*
* y(n) = x(n)h(0) + x(n-1)h(1) + x(n-2)h(2) + ... x(n-m)h(m)
*
* ^^^ y: output
* x: input
* h: FIR filter coefficients; length 'm'
*
* the start_data_index points to the x(n-m) data value, which is the 'oldest' sample
* that is required to compute y(n).
*/
uint32_t start_data_index = (blockIdx.x * blockDim.x * num_output_samples_to_process_per_thread) +
(threadIdx.x * num_output_samples_to_process_per_thread);
/* the same calculation is equivalent to finding the output offset, or the 'n'
* used to index the output array */
uint32_t output_data_offset = start_data_index;
uint32_t num_padding_samples = coeff_len - 1;
/* catch the case where the output buffer + padding is shorter than
* the input buffer */
if ((out_data_len + num_padding_samples) < in_data_len)
{
return;
}
/* catch the case where the input size is not an integer
* multiple of the thread block size */
if ((start_data_index + num_padding_samples) > in_data_len ||
start_data_index > out_data_len)
{
return;
}
/* the previous checks captured cases where the current thread should do nothing.
* now catch the case where this kernel cannot process the full number of samples
* when the end of the input buffer is reached. note that due to the previous
* check to make sure that the output buffer is at least as long as the input
* buffer it is sufficient to only check the input buffer here */
uint32_t local_num_samples_to_process = num_output_samples_to_process_per_thread;
if ((start_data_index + num_padding_samples + num_output_samples_to_process_per_thread) > in_data_len)
{
local_num_samples_to_process = in_data_len - start_data_index - num_padding_samples;
}
cuFloatComplex *data_ptr = nullptr;
cuFloatComplex *coeff_ptr = nullptr;
cuFloatComplex accum;
/* compute the output values */
for (uint32_t out_sample_idx = 0;
out_sample_idx < local_num_samples_to_process &&
(start_data_index + num_padding_samples + out_sample_idx) < in_data_len &&
(output_data_offset + out_sample_idx) < out_data_len;
++out_sample_idx)
{
/* reset for each new output. data_ptr is pointed to the oldest data that
* is needed for the y(n) output. coeff_ptr is therefore pointed to the
* end of the coefficient array */
data_ptr = &in_data[start_data_index + out_sample_idx];
coeff_ptr = &coeffs[coeff_len - 1]; /* last coefficient in the array */
accum.x = 0;
accum.y = 0;
/* go through all coefficients */
for (uint32_t ii = 0; ii < coeff_len; ++ii)
{
accum = cuCaddf(accum, cuCmulf(*data_ptr++, *coeff_ptr--));
}
/* output computed; save to the output buffer */
out_data[output_data_offset + out_sample_idx] = accum;
}
}
/******************************************************************************
* CLASS IMPLEMENTATION
*****************************************************************************/
falcon_dsp_fir_filter_cuda::falcon_dsp_fir_filter_cuda(std::vector<std::complex<float>> &coeffs)
: falcon_dsp_fir_filter(coeffs),
m_cuda_coeff_data(nullptr),
m_cuda_input_data(nullptr),
m_max_num_input_samples(0),
m_cuda_output_data(nullptr),
m_max_num_output_samples(0)
{
/* allocate CUDA memory for the coefficient information; since these are set
* when the class is constructed and cannot be changed the amount of data
* is known now */
cudaErrChkAssert(cudaMallocManaged(&m_cuda_coeff_data,
m_coefficients.size() * sizeof(cuFloatComplex)));
/* copy the coefficient information to the GPU */
cudaErrChkAssert(cudaMemcpy(static_cast<void *>(m_cuda_coeff_data),
static_cast<void *>(m_coefficients.data()),
m_coefficients.size() * sizeof(std::complex<float>),
cudaMemcpyHostToDevice));
m_input_padding_in_samples = m_coefficients.size() - 1;
/* change the shared memory size to 8 bytes per shared memory bank. this is so that we
* can better handle complex<float> data, which is natively 8 bytes in size */
cudaErrChkAssert(cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte));
}
falcon_dsp_fir_filter_cuda::~falcon_dsp_fir_filter_cuda(void)
{
std::lock_guard<std::mutex> lock(std::mutex);
/* cleanup the CUDA memory that was reserved in the constructor to house
* the coefficient information */
if (m_cuda_coeff_data)
{
cudaErrChk(cudaFree(m_cuda_coeff_data));
}
}
bool falcon_dsp_fir_filter_cuda::apply(std::vector<std::complex<int16_t>>& in, std::vector<std::complex<int16_t>>& out)
{
/* create another copy of the data and cast to std::complex<float> */
std::vector<std::complex<float>> tmp_in_vec;
tmp_in_vec.reserve(in.size());
for (auto in_iter = in.begin(); in_iter != in.end(); ++in_iter)
{
tmp_in_vec.push_back(std::complex<float>((*in_iter).real(), (*in_iter).imag()));
}
/* filter the input data */
std::vector<std::complex<float>> tmp_out_vec;
bool ret = apply(tmp_in_vec, tmp_out_vec);
/* cast the filtered output back to std::complex<int16_t> */
for (auto out_iter = tmp_out_vec.begin(); out_iter != tmp_out_vec.end(); ++out_iter)
{
out.push_back(std::complex<int16_t>((*out_iter).real(), (*out_iter).imag()));
}
return ret;
}
bool falcon_dsp_fir_filter_cuda::apply(std::vector<std::complex<float>>& in, std::vector<std::complex<float>>& out)
{
std::lock_guard<std::mutex> lock(std::mutex);
/* sanity check object state */
if (m_coefficients.size() == 0)
{
return false;
}
/* clear the output data structures and resize so that they can hold the filtered
* data. note that by using resize() the vector size is now equal to the requested
* size even without explicitly adding data to the vector, which means that we can
* add data directly into the vector data buffer without worrying about the
* vector size getting mismatched with the buffer contents.
*
* note that the output size will always be equal to the input size since the input
* data is either padded with zeroes or previous state information. */
out.clear();
out.resize(in.size());
/* if there is enough space in previously allocated memory then use it; otherwise
* allocate new memory buffers. it is left as a future feature to specify a maximum
* memory size and process the data in chunks instead of requiring enough GPU
* memory to process the whole vector at once */
/* allocate CUDA memory for the input samples */
if (m_max_num_input_samples < (in.size() + m_input_padding_in_samples))
{
if (m_cuda_input_data)
{
cudaErrChkAssert(cudaFree(m_cuda_input_data));
m_cuda_input_data = nullptr;
m_max_num_input_samples = 0;
}
cudaErrChkAssert(cudaMallocManaged(&m_cuda_input_data,
(in.size() + m_input_padding_in_samples) * sizeof(std::complex<float>)));
m_max_num_input_samples = in.size() + m_input_padding_in_samples;
}
/* allocate CUDA memory for the output samples */
if (m_max_num_output_samples < in.size())
{
/* clean up existing memory */
if (m_cuda_output_data)
{
cudaErrChkAssert(cudaFree(m_cuda_output_data));
m_cuda_output_data = nullptr;
m_max_num_output_samples = 0;
}
/* allocate CUDA unified memory space for the output data */
cudaErrChkAssert(cudaMallocManaged(&m_cuda_output_data,
in.size() * sizeof(std::complex<float>)));
m_max_num_output_samples = in.size();
}
/* prepare the padding/state information. state information is stored such that the last
* element in the m_state container should immediately precede the input data. */
std::vector<std::complex<float>> prev_data(m_input_padding_in_samples, std::complex<float>(0.0, 0.0));
auto prev_data_iter = prev_data.rbegin();
auto state_iter = m_state.rbegin();
for (uint32_t ii = 0;
ii < m_input_padding_in_samples &&
prev_data_iter != prev_data.rend() &&
state_iter != m_state.rend();
++ii)
{
*(prev_data_iter++) = *(state_iter++);
}
/* copy the padding/state information to the GPU */
cudaErrChkAssert(cudaMemcpy(static_cast<void *>(m_cuda_input_data),
static_cast<void *>(prev_data.data()),
prev_data.size() * sizeof(std::complex<float>),
cudaMemcpyHostToDevice));
/* copy the input data to the GPU; note the offset by prev_data.size() to
* provide room for the padding/state samples */
cudaErrChkAssert(cudaMemcpy(static_cast<void *>(m_cuda_input_data + prev_data.size()),
static_cast<void *>(in.data()),
in.size() * sizeof(std::complex<float>),
cudaMemcpyHostToDevice));
/* run kernel on the GPU */
uint32_t num_samples_per_thread = 1;
uint32_t samples_per_thread_block = num_samples_per_thread * MAX_NUM_CUDA_THREADS;
uint32_t num_thread_blocks = (in.size() + samples_per_thread_block - 1) / samples_per_thread_block;
falcon_dsp::falcon_dsp_host_timer timer("KERNEL", TIMING_LOGS_ENABLED);
__fir_filter<<<num_thread_blocks, MAX_NUM_CUDA_THREADS>>>(m_cuda_coeff_data,
m_coefficients.size(),
num_samples_per_thread,
m_cuda_input_data,
m_max_num_input_samples,
m_cuda_output_data,
m_max_num_output_samples);
cudaErrChkAssert(cudaPeekAtLastError());
/* wait for GPU to finish before accessing on host */
cudaErrChkAssert(cudaDeviceSynchronize());
timer.log_duration("Single Chan Kernel Complete");
/* copy output samples out of CUDA memory */
cudaErrChkAssert(cudaMemcpy(static_cast<void *>(out.data()),
static_cast<void *>(m_cuda_output_data),
in.size() * sizeof(std::complex<float>),
cudaMemcpyDeviceToHost));
/* finished handling the current data; now update the state array */
_update_state(in);
return out.size() > 0;
}
}
|
9e9aac45921f1c2ef0dc5324b4c027f614be003b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "..\..\Common\Utils.h"
#include "GridProcs.h"
int runBoxPick();
int runIsingK();
int runThermal();
//void randomInit(int *data, int size)
//{
// for (int i = 0; i < size; ++i)
// data[i] = rand();
//}
//
//int runIsingK();
//
//int runGol();
//
//////////////////////////////////////////////////////////////////////////////////
//// Program main
//////////////////////////////////////////////////////////////////////////////////
//int main(int argc, char **argv)
//{
// runThermal();
//}
int runThermal()
{
const int span = 16;
int gridVol = span * span;
float *h_grid_A_in;
float *h_grid_B_in;
float *host_out;
float *dev_A;
float *dev_B;
hipError_t cudaStatus;
printf("in runThermal\n");
h_grid_A_in = LeftRightGradient(span, 0, 1);
h_grid_B_in = LeftRightGradient(span, 0, 1);
printf("in array A: \n\n");
PrintFloatArray(h_grid_A_in, span, gridVol);
host_out = (float *)malloc(gridVol * sizeof(float));
cudaStatus = hipMalloc((void**)&dev_A, gridVol * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_B, gridVol * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_A, h_grid_A_in, gridVol * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_B, h_grid_B_in, gridVol * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
DllRun_k_Thermo_dg(dev_B, dev_A, span, 1, 0.1, 0, 3);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching copyKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = hipMemcpy(host_out, dev_B, gridVol * sizeof(float), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("out array B: \n\n");
PrintFloatArray(host_out, span, gridVol);
Error:
hipFree(dev_A);
hipFree(dev_B);
return cudaStatus;
}
int runBoxPick()
{
const int span = 32;
const int blockSize = 4;
const int blocks_per_span = span / blockSize;
const int blockCount = blocks_per_span * blocks_per_span;
int gridVol = span * span;
unsigned int *host_rands_in;
int *host_out;
unsigned int *dev_rands;
int *dev_out;
hipError_t cudaStatus;
host_rands_in = RndInts(blockCount);
host_out = (int *)malloc(gridVol * sizeof(int));
PrintUintArray(host_rands_in, blocks_per_span, blockCount);
printf("\n\n");
cudaStatus = hipMalloc((void**)&dev_rands, blockCount * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_out, gridVol * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_rands, host_rands_in, blockCount * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
DllRun_k_RandBlockPick(dev_out, dev_rands, blockSize, blocks_per_span);
cudaStatus = hipMemcpy(host_out, dev_out, gridVol * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
PrintIntArray(host_out, span, gridVol);
printf("\n\n");
Error:
hipFree(dev_rands);
hipFree(dev_out);
return cudaStatus;
}
int runIsingK()
{
const int span = 16;
int gridVol = span * span;
int *h_grid_in;
float *h_rands_in;
float *h_thresh_in;
int *host_out;
int *dev_odd;
int *dev_even;
int *dev_energy;
float *dev_rands;
float *dev_thresh;
hipError_t cudaStatus;
printf("in runIsingK\n");
h_thresh_in = (float *)malloc(10 * sizeof(float));
h_thresh_in[1] = 1.0;
h_thresh_in[3] = 1.0;
h_thresh_in[5] = 0.5;
h_thresh_in[7] = 0.2;
h_thresh_in[9] = 0.1;
h_grid_in = Rnd_m1or1(gridVol, 0.3);
h_rands_in = RndFloat0to1(gridVol);
printf("in array: \n\n");
PrintFloatArray(h_rands_in, span, gridVol);
printf("in rands: \n\n");
PrintIntArray(h_grid_in, span, gridVol);
host_out = (int *)malloc(gridVol * sizeof(int));
cudaStatus = hipMalloc((void**)&dev_energy, gridVol * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_thresh, 10 * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_rands, gridVol * sizeof(float));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_odd, gridVol * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_even, gridVol * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_even, h_grid_in, gridVol * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_rands, h_rands_in, gridVol * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_thresh, h_thresh_in, 10 * sizeof(float), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
DllRun_k_Ising_dg(dev_odd, dev_energy, dev_even, dev_rands, span, 1, dev_thresh);
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching copyKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(host_out, dev_odd, gridVol * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
printf("out grid: \n\n");
PrintIntArray(host_out, span, gridVol);
Error:
hipFree(dev_odd);
hipFree(dev_even);
return cudaStatus;
}
//
//
//int runGol()
//{
// const int span = 28;
// int gridVol = span * span;
// int *host_in;
// int *host_out;
// int *dev_odd;
// int *dev_even;
// hipError_t cudaStatus;
//
// host_in = Rnd0or1(gridVol, 0.3);
// host_out = (int *)malloc(gridVol * sizeof(int));
//
// PrintIntArray(host_in, span, gridVol);
// printf("\n\n");
//
// cudaStatus = hipMalloc((void**)&dev_odd, gridVol * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMalloc((void**)&dev_even, gridVol * sizeof(int));
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMalloc failed!");
// goto Error;
// }
//
// cudaStatus = hipMemcpy(dev_even, host_in, gridVol * sizeof(int), hipMemcpyHostToDevice);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
//
// k_Gol <<<span, span>>>(dev_odd, dev_even, span);
//
// cudaStatus = hipDeviceSynchronize();
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching copyKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = hipMemcpy(host_out, dev_odd, gridVol * sizeof(int), hipMemcpyDeviceToHost);
// if (cudaStatus != hipSuccess) {
// fprintf(stderr, "hipMemcpy failed!");
// goto Error;
// }
//
//
// PrintIntArray(host_out, span, gridVol);
//
//Error:
// hipFree(dev_odd);
// hipFree(dev_even);
//
// return cudaStatus;
//}
|
9e9aac45921f1c2ef0dc5324b4c027f614be003b.cu
|
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "..\..\Common\Utils.h"
#include "GridProcs.h"
int runBoxPick();
int runIsingK();
int runThermal();
//void randomInit(int *data, int size)
//{
// for (int i = 0; i < size; ++i)
// data[i] = rand();
//}
//
//int runIsingK();
//
//int runGol();
//
//////////////////////////////////////////////////////////////////////////////////
//// Program main
//////////////////////////////////////////////////////////////////////////////////
//int main(int argc, char **argv)
//{
// runThermal();
//}
int runThermal()
{
const int span = 16;
int gridVol = span * span;
float *h_grid_A_in;
float *h_grid_B_in;
float *host_out;
float *dev_A;
float *dev_B;
cudaError_t cudaStatus;
printf("in runThermal\n");
h_grid_A_in = LeftRightGradient(span, 0, 1);
h_grid_B_in = LeftRightGradient(span, 0, 1);
printf("in array A: \n\n");
PrintFloatArray(h_grid_A_in, span, gridVol);
host_out = (float *)malloc(gridVol * sizeof(float));
cudaStatus = cudaMalloc((void**)&dev_A, gridVol * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_B, gridVol * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_A, h_grid_A_in, gridVol * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_B, h_grid_B_in, gridVol * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
DllRun_k_Thermo_dg(dev_B, dev_A, span, 1, 0.1, 0, 3);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching copyKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(host_out, dev_B, gridVol * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("out array B: \n\n");
PrintFloatArray(host_out, span, gridVol);
Error:
cudaFree(dev_A);
cudaFree(dev_B);
return cudaStatus;
}
int runBoxPick()
{
const int span = 32;
const int blockSize = 4;
const int blocks_per_span = span / blockSize;
const int blockCount = blocks_per_span * blocks_per_span;
int gridVol = span * span;
unsigned int *host_rands_in;
int *host_out;
unsigned int *dev_rands;
int *dev_out;
cudaError_t cudaStatus;
host_rands_in = RndInts(blockCount);
host_out = (int *)malloc(gridVol * sizeof(int));
PrintUintArray(host_rands_in, blocks_per_span, blockCount);
printf("\n\n");
cudaStatus = cudaMalloc((void**)&dev_rands, blockCount * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_out, gridVol * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_rands, host_rands_in, blockCount * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
DllRun_k_RandBlockPick(dev_out, dev_rands, blockSize, blocks_per_span);
cudaStatus = cudaMemcpy(host_out, dev_out, gridVol * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
PrintIntArray(host_out, span, gridVol);
printf("\n\n");
Error:
cudaFree(dev_rands);
cudaFree(dev_out);
return cudaStatus;
}
int runIsingK()
{
const int span = 16;
int gridVol = span * span;
int *h_grid_in;
float *h_rands_in;
float *h_thresh_in;
int *host_out;
int *dev_odd;
int *dev_even;
int *dev_energy;
float *dev_rands;
float *dev_thresh;
cudaError_t cudaStatus;
printf("in runIsingK\n");
h_thresh_in = (float *)malloc(10 * sizeof(float));
h_thresh_in[1] = 1.0;
h_thresh_in[3] = 1.0;
h_thresh_in[5] = 0.5;
h_thresh_in[7] = 0.2;
h_thresh_in[9] = 0.1;
h_grid_in = Rnd_m1or1(gridVol, 0.3);
h_rands_in = RndFloat0to1(gridVol);
printf("in array: \n\n");
PrintFloatArray(h_rands_in, span, gridVol);
printf("in rands: \n\n");
PrintIntArray(h_grid_in, span, gridVol);
host_out = (int *)malloc(gridVol * sizeof(int));
cudaStatus = cudaMalloc((void**)&dev_energy, gridVol * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_thresh, 10 * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_rands, gridVol * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_odd, gridVol * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_even, gridVol * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_even, h_grid_in, gridVol * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_rands, h_rands_in, gridVol * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_thresh, h_thresh_in, 10 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
DllRun_k_Ising_dg(dev_odd, dev_energy, dev_even, dev_rands, span, 1, dev_thresh);
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching copyKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(host_out, dev_odd, gridVol * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
printf("out grid: \n\n");
PrintIntArray(host_out, span, gridVol);
Error:
cudaFree(dev_odd);
cudaFree(dev_even);
return cudaStatus;
}
//
//
//int runGol()
//{
// const int span = 28;
// int gridVol = span * span;
// int *host_in;
// int *host_out;
// int *dev_odd;
// int *dev_even;
// cudaError_t cudaStatus;
//
// host_in = Rnd0or1(gridVol, 0.3);
// host_out = (int *)malloc(gridVol * sizeof(int));
//
// PrintIntArray(host_in, span, gridVol);
// printf("\n\n");
//
// cudaStatus = cudaMalloc((void**)&dev_odd, gridVol * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMalloc((void**)&dev_even, gridVol * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error;
// }
//
// cudaStatus = cudaMemcpy(dev_even, host_in, gridVol * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
//
// k_Gol <<<span, span>>>(dev_odd, dev_even, span);
//
// cudaStatus = cudaDeviceSynchronize();
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching copyKernel!\n", cudaStatus);
// goto Error;
// }
//
// // Copy output vector from GPU buffer to host memory.
// cudaStatus = cudaMemcpy(host_out, dev_odd, gridVol * sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error;
// }
//
//
// PrintIntArray(host_out, span, gridVol);
//
//Error:
// cudaFree(dev_odd);
// cudaFree(dev_even);
//
// return cudaStatus;
//}
|
ce7265bd46a27c6250f2820e22c255349b324f9f.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* (c) 2019 Gustavo Valiente [email protected]
*
* MIT License, see LICENSE file.
*/
#include "pcps_thrust_cached_allocator.h"
#include <iostream>
#include "thrust/system/hip/memory.h"
namespace pcps_thrust
{
namespace
{
constexpr int Size = 8192;
}
CachedAllocator::value_type* CachedAllocator::allocate(std::ptrdiff_t numBytes)
{
if(std::size_t(numBytes) > Size)
{
std::cerr << "CachedAllocator::allocate(): invalid numBytes: " << numBytes << std::endl;
return nullptr;
}
std::size_t numFreeBlocks = _freeBlocks.size();
value_type* result = nullptr;
if(numFreeBlocks > 0)
{
result = _freeBlocks[numFreeBlocks - 1];
_freeBlocks.pop_back();
}
else
{
result = thrust::hip::malloc<value_type>(Size).get();
if(! result)
{
std::cerr << "CachedAllocator::allocate(): thrust::hip::malloc call failed" << std::endl;
return nullptr;
}
}
return result;
}
void CachedAllocator::deallocate(value_type* pointer, std::size_t) noexcept
{
_freeBlocks.push_back(pointer);
}
void CachedAllocator::clear() noexcept
{
for(value_type* pointer : _freeBlocks)
{
thrust::hip::free(thrust::hip::pointer<value_type>(pointer));
}
_freeBlocks.clear();
}
}
|
ce7265bd46a27c6250f2820e22c255349b324f9f.cu
|
/*
* (c) 2019 Gustavo Valiente [email protected]
*
* MIT License, see LICENSE file.
*/
#include "pcps_thrust_cached_allocator.h"
#include <iostream>
#include "thrust/system/cuda/memory.h"
namespace pcps_thrust
{
namespace
{
constexpr int Size = 8192;
}
CachedAllocator::value_type* CachedAllocator::allocate(std::ptrdiff_t numBytes)
{
if(std::size_t(numBytes) > Size)
{
std::cerr << "CachedAllocator::allocate(): invalid numBytes: " << numBytes << std::endl;
return nullptr;
}
std::size_t numFreeBlocks = _freeBlocks.size();
value_type* result = nullptr;
if(numFreeBlocks > 0)
{
result = _freeBlocks[numFreeBlocks - 1];
_freeBlocks.pop_back();
}
else
{
result = thrust::cuda::malloc<value_type>(Size).get();
if(! result)
{
std::cerr << "CachedAllocator::allocate(): thrust::cuda::malloc call failed" << std::endl;
return nullptr;
}
}
return result;
}
void CachedAllocator::deallocate(value_type* pointer, std::size_t) noexcept
{
_freeBlocks.push_back(pointer);
}
void CachedAllocator::clear() noexcept
{
for(value_type* pointer : _freeBlocks)
{
thrust::cuda::free(thrust::cuda::pointer<value_type>(pointer));
}
_freeBlocks.clear();
}
}
|
f91d84e724f5fe236650feb01974139a7304095d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/user/kernels/ctc_greedy_decoder.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void CtcGreedyDecodeGpuMultiThread(int64_t* decoded_ptr, T* neg_sum_logits_ptr,
const T* log_probs_ptr,
const int64_t* input_lengths_ptr,
const bool merge_repeated,
const int64_t max_input_length,
const int64_t batch_size, const int64_t num_labels) {
const int64_t bid = blockIdx.x;
const int64_t tid = threadIdx.x;
for (int64_t b = bid; b < batch_size; b += gridDim.x) {
if (tid == 0) {
if (input_lengths_ptr[b] > max_input_length) __trap();
}
}
for (int64_t b = bid; b < batch_size; b += gridDim.x) {
extern __shared__ int64_t shared_max_indices_memory[];
int64_t* shared_max_indices = (int64_t*)shared_max_indices_memory;
NdIndexOffsetHelper<int64_t, 3> input_helper(max_input_length, batch_size, num_labels);
for (int64_t t = tid; t < max_input_length; t += blockDim.x) {
const T* prob_data_t = &log_probs_ptr[input_helper.NdIndexToOffset(t, b, 0)];
int64_t max_indice = 0;
T max_value = -FLT_MAX;
FOR_RANGE(int64_t, c, 0, num_labels) {
const T prob = prob_data_t[c];
if (prob > max_value) {
max_indice = c;
max_value = prob;
}
}
shared_max_indices[t] = max_indice;
}
__syncthreads();
if (tid == 0) {
int64_t prev_indices = -1, t_dec = 0;
FOR_RANGE(int64_t, t, 0, input_lengths_ptr[b]) {
const T* prob_data_t = &log_probs_ptr[input_helper.NdIndexToOffset(t, b, 0)];
const int64_t indice_t = shared_max_indices[t];
neg_sum_logits_ptr[b] -= prob_data_t[indice_t];
if (indice_t != num_labels - 1 && !(merge_repeated && (prev_indices == indice_t))) {
decoded_ptr[b * max_input_length + t_dec] = indice_t;
t_dec++;
}
prev_indices = indice_t;
}
FOR_RANGE(int64_t, t, t_dec, max_input_length) { decoded_ptr[b * max_input_length + t] = 0; }
}
}
}
template<typename T>
__global__ void CtcGreedyDecodeGpu(int64_t* decoded_ptr, T* neg_sum_logits_ptr,
const T* log_probs_ptr, const int64_t* input_lengths_ptr,
const bool merge_repeated, const int64_t max_input_length,
const int64_t batch_size, const int64_t num_labels) {
for (int64_t b = 0; b < batch_size; b++) {
if (input_lengths_ptr[b] > max_input_length) __trap();
}
NdIndexOffsetHelper<int64_t, 3> input_helper(max_input_length, batch_size, num_labels);
CUDA_1D_KERNEL_LOOP(b, batch_size) {
int prev_indices = -1, t_dec = 0;
neg_sum_logits_ptr[b] = 0;
FOR_RANGE(int64_t, t, 0, input_lengths_ptr[b]) {
const T* prob_data_t = &log_probs_ptr[input_helper.NdIndexToOffset(t, b, 0)];
int64_t max_indice = -1;
T max_value = -FLT_MAX;
FOR_RANGE(int64_t, c, 0, num_labels) {
if (prob_data_t[c] > max_value) {
max_indice = c;
max_value = prob_data_t[c];
}
}
neg_sum_logits_ptr[b] -= max_value;
if (max_indice != num_labels - 1 && !(merge_repeated && (prev_indices == max_indice))) {
decoded_ptr[b * max_input_length + t_dec] = max_indice;
t_dec++;
}
prev_indices = max_indice;
}
FOR_RANGE(int64_t, t, t_dec, max_input_length) { decoded_ptr[b * max_input_length + t] = 0; }
}
}
template<typename T>
struct CTCGreedyDecoderFunctor<DeviceType::kCUDA, T> final {
void operator()(ep::Stream* stream, int64_t* decoded_ptr, T* neg_sum_logits_ptr,
const T* log_probs_ptr, const int64_t* input_lengths_ptr,
const bool merge_repeated, const int64_t max_input_length,
const int64_t batch_size, const int64_t num_labels) {
int32_t thread_num = batch_size * kCudaThreadsNumPerBlock;
int64_t shared_mem_size = max_input_length * sizeof(int64_t);
int max_active_blocks;
OF_CUDA_CHECK(hipOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, CtcGreedyDecodeGpu<T>, kCudaThreadsNumPerBlock, shared_mem_size));
if (max_active_blocks > 0) {
hipLaunchKernelGGL(( CtcGreedyDecodeGpuMultiThread), dim3(BlocksNum4ThreadsNum(thread_num)), dim3(kCudaThreadsNumPerBlock),
shared_mem_size,
stream->As<ep::CudaStream>()->cuda_stream(),
decoded_ptr, neg_sum_logits_ptr, log_probs_ptr, input_lengths_ptr, merge_repeated,
max_input_length, batch_size, num_labels);
} else {
hipLaunchKernelGGL(( CtcGreedyDecodeGpu), dim3(BlocksNum4ThreadsNum(thread_num)), dim3(kCudaThreadsNumPerBlock), 0,
stream->As<ep::CudaStream>()->cuda_stream(),
decoded_ptr, neg_sum_logits_ptr, log_probs_ptr, input_lengths_ptr, merge_repeated,
max_input_length, batch_size, num_labels);
}
}
};
} // namespace
REGISTER_CTC_GREEDY_DECODER_KERNELS(DeviceType::kCUDA, float);
REGISTER_CTC_GREEDY_DECODER_KERNELS(DeviceType::kCUDA, double);
} // namespace oneflow
|
f91d84e724f5fe236650feb01974139a7304095d.cu
|
/*
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include "oneflow/core/framework/framework.h"
#include "oneflow/core/kernel/new_kernel_util.h"
#include "oneflow/core/kernel/kernel_util.h"
#include "oneflow/user/kernels/ctc_greedy_decoder.h"
#include "oneflow/core/ep/cuda/cuda_stream.h"
namespace oneflow {
namespace {
template<typename T>
__global__ void CtcGreedyDecodeGpuMultiThread(int64_t* decoded_ptr, T* neg_sum_logits_ptr,
const T* log_probs_ptr,
const int64_t* input_lengths_ptr,
const bool merge_repeated,
const int64_t max_input_length,
const int64_t batch_size, const int64_t num_labels) {
const int64_t bid = blockIdx.x;
const int64_t tid = threadIdx.x;
for (int64_t b = bid; b < batch_size; b += gridDim.x) {
if (tid == 0) {
if (input_lengths_ptr[b] > max_input_length) __trap();
}
}
for (int64_t b = bid; b < batch_size; b += gridDim.x) {
extern __shared__ int64_t shared_max_indices_memory[];
int64_t* shared_max_indices = (int64_t*)shared_max_indices_memory;
NdIndexOffsetHelper<int64_t, 3> input_helper(max_input_length, batch_size, num_labels);
for (int64_t t = tid; t < max_input_length; t += blockDim.x) {
const T* prob_data_t = &log_probs_ptr[input_helper.NdIndexToOffset(t, b, 0)];
int64_t max_indice = 0;
T max_value = -FLT_MAX;
FOR_RANGE(int64_t, c, 0, num_labels) {
const T prob = prob_data_t[c];
if (prob > max_value) {
max_indice = c;
max_value = prob;
}
}
shared_max_indices[t] = max_indice;
}
__syncthreads();
if (tid == 0) {
int64_t prev_indices = -1, t_dec = 0;
FOR_RANGE(int64_t, t, 0, input_lengths_ptr[b]) {
const T* prob_data_t = &log_probs_ptr[input_helper.NdIndexToOffset(t, b, 0)];
const int64_t indice_t = shared_max_indices[t];
neg_sum_logits_ptr[b] -= prob_data_t[indice_t];
if (indice_t != num_labels - 1 && !(merge_repeated && (prev_indices == indice_t))) {
decoded_ptr[b * max_input_length + t_dec] = indice_t;
t_dec++;
}
prev_indices = indice_t;
}
FOR_RANGE(int64_t, t, t_dec, max_input_length) { decoded_ptr[b * max_input_length + t] = 0; }
}
}
}
template<typename T>
__global__ void CtcGreedyDecodeGpu(int64_t* decoded_ptr, T* neg_sum_logits_ptr,
const T* log_probs_ptr, const int64_t* input_lengths_ptr,
const bool merge_repeated, const int64_t max_input_length,
const int64_t batch_size, const int64_t num_labels) {
for (int64_t b = 0; b < batch_size; b++) {
if (input_lengths_ptr[b] > max_input_length) __trap();
}
NdIndexOffsetHelper<int64_t, 3> input_helper(max_input_length, batch_size, num_labels);
CUDA_1D_KERNEL_LOOP(b, batch_size) {
int prev_indices = -1, t_dec = 0;
neg_sum_logits_ptr[b] = 0;
FOR_RANGE(int64_t, t, 0, input_lengths_ptr[b]) {
const T* prob_data_t = &log_probs_ptr[input_helper.NdIndexToOffset(t, b, 0)];
int64_t max_indice = -1;
T max_value = -FLT_MAX;
FOR_RANGE(int64_t, c, 0, num_labels) {
if (prob_data_t[c] > max_value) {
max_indice = c;
max_value = prob_data_t[c];
}
}
neg_sum_logits_ptr[b] -= max_value;
if (max_indice != num_labels - 1 && !(merge_repeated && (prev_indices == max_indice))) {
decoded_ptr[b * max_input_length + t_dec] = max_indice;
t_dec++;
}
prev_indices = max_indice;
}
FOR_RANGE(int64_t, t, t_dec, max_input_length) { decoded_ptr[b * max_input_length + t] = 0; }
}
}
template<typename T>
struct CTCGreedyDecoderFunctor<DeviceType::kCUDA, T> final {
void operator()(ep::Stream* stream, int64_t* decoded_ptr, T* neg_sum_logits_ptr,
const T* log_probs_ptr, const int64_t* input_lengths_ptr,
const bool merge_repeated, const int64_t max_input_length,
const int64_t batch_size, const int64_t num_labels) {
int32_t thread_num = batch_size * kCudaThreadsNumPerBlock;
int64_t shared_mem_size = max_input_length * sizeof(int64_t);
int max_active_blocks;
OF_CUDA_CHECK(cudaOccupancyMaxActiveBlocksPerMultiprocessor(
&max_active_blocks, CtcGreedyDecodeGpu<T>, kCudaThreadsNumPerBlock, shared_mem_size));
if (max_active_blocks > 0) {
CtcGreedyDecodeGpuMultiThread<<<BlocksNum4ThreadsNum(thread_num), kCudaThreadsNumPerBlock,
shared_mem_size,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
decoded_ptr, neg_sum_logits_ptr, log_probs_ptr, input_lengths_ptr, merge_repeated,
max_input_length, batch_size, num_labels);
} else {
CtcGreedyDecodeGpu<<<BlocksNum4ThreadsNum(thread_num), kCudaThreadsNumPerBlock, 0,
stream->As<ep::CudaStream>()->cuda_stream()>>>(
decoded_ptr, neg_sum_logits_ptr, log_probs_ptr, input_lengths_ptr, merge_repeated,
max_input_length, batch_size, num_labels);
}
}
};
} // namespace
REGISTER_CTC_GREEDY_DECODER_KERNELS(DeviceType::kCUDA, float);
REGISTER_CTC_GREEDY_DECODER_KERNELS(DeviceType::kCUDA, double);
} // namespace oneflow
|
fa6ae1cb8948ef2a6d593fe4c93647dea6fe2b04.hip
|
// !!! This is a file automatically generated by hipify!!!
//fail: assertion
//--blockDim=512 --gridDim=1 --no-inline
#include <hip/hip_runtime.h>
#include <assert.h>
#include <stdio.h>
#define N 2//512
__global__ void helloCUDA(volatile int* p)
{
//__assert(__no_read(p));
p[threadIdx.x] = threadIdx.x;
}
|
fa6ae1cb8948ef2a6d593fe4c93647dea6fe2b04.cu
|
//fail: assertion
//--blockDim=512 --gridDim=1 --no-inline
#include <cuda.h>
#include <assert.h>
#include <stdio.h>
#define N 2//512
__global__ void helloCUDA(volatile int* p)
{
//__assert(__no_read(p));
p[threadIdx.x] = threadIdx.x;
}
|
6d65a5055ab4ccb59bff9d465095376393f1814f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cu_integral_image.h"
#include "launch_utils.h"
#include "CUDA_SDK/sharedmem.h"
namespace roo
{
//////////////////////////////////////////////////////
// Image Transpose
// Efficient Integral Image Computation on the GPU
// Berkin Bilgic, Berthold K.P. Horn, Ichiro Masaki
//////////////////////////////////////////////////////
template<typename Tout, typename Tin, int BLOCK_DIM>
__global__ void KernTranspose(Image<Tout> out, Image<Tin> in)
{
__shared__ Tin temp[BLOCK_DIM][BLOCK_DIM+1];
int xIndex = blockIdx.x*BLOCK_DIM + threadIdx.x;
int yIndex = blockIdx.y*BLOCK_DIM + threadIdx.y;
if((xIndex < in.w) && (yIndex < in.h)) {
temp[threadIdx.y][threadIdx.x] = in(xIndex,yIndex);
}
__syncthreads();
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < in.h) && (yIndex < in.w)) {
out(xIndex,yIndex) = temp[threadIdx.x][threadIdx.y];
}
}
template<typename Tout, typename Tin>
void Transpose(Image<Tout> out, Image<Tin> in)
{
dim3 gridDim, blockDim;
InitDimFromOutputImageOver(blockDim,gridDim,in, 16,16);
hipLaunchKernelGGL(( KernTranspose<Tout,Tin,16>), dim3(gridDim),dim3(blockDim), 0, 0, out,in);
}
// Instantiate useful versions
template KANGAROO_EXPORT void Transpose(Image<unsigned char>,Image<unsigned char>);
template KANGAROO_EXPORT void Transpose(Image<int>,Image<int>);
template KANGAROO_EXPORT void Transpose(Image<float>,Image<float>);
//////////////////////////////////////////////////////
// PrefixSum
// Efficient Integral Image Computation on the GPU
// Berkin Bilgic, Berthold K.P. Horn, Ichiro Masaki
//////////////////////////////////////////////////////
template<typename Tout, typename Tin>
inline __device__
void PrefixSum(Tout* output, Tin* input, int w, int nextpow2)
{
SharedMemory<Tout> shared;
Tout* temp = shared.getPointer();
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
Tout t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
}
template<typename Tout, typename Tin>
__global__ void KernPrefixSumRows(Image<Tout> out, Image<Tin> in)
{
const int row = blockIdx.y;
PrefixSum<Tout,Tin>(out.RowPtr(row), in.RowPtr(row), in.w, 2*blockDim.x );
}
template<typename Tout, typename Tin>
void PrefixSumRows(Image<Tout> out, Image<Tin> in)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(in.w/2.0f)) blockDim.x <<= 1;
const dim3 gridDim = dim3( 1, in.h );
hipLaunchKernelGGL(( KernPrefixSumRows), dim3(gridDim),dim3(blockDim),2*sizeof(Tout)*blockDim.x, 0, out,in);
}
// Instantiate useful versions
template KANGAROO_EXPORT void PrefixSumRows(Image<int>, Image<unsigned char>);
template KANGAROO_EXPORT void PrefixSumRows(Image<int>, Image<int>);
template KANGAROO_EXPORT void PrefixSumRows(Image<float>, Image<float>);
//////////////////////////////////////////////////////
// Large Radius Box Filter using Integral Image
//////////////////////////////////////////////////////
template<typename Tout, typename Tin>
__global__ void KernBoxFilterIntegralImage(Image<Tout> out, Image<Tin> IntegralImageT, int rad)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if(out.InBounds(x,y)) {
const int minx = max(0,x-rad);
const int maxx = min((int)out.w-1,x+rad);
const int miny = max(0,y-rad);
const int maxy = min((int)out.h-1,y+rad);
const int winw = maxx - minx;
const int winh = maxy - miny;
const int area = winw * winh;
const Tin A = IntegralImageT(miny,minx);
const Tin B = IntegralImageT(miny,maxx);
const Tin C = IntegralImageT(maxy,maxx);
const Tin D = IntegralImageT(maxy,minx);
const Tin sum = C+A-B-D;
const Tout mean = (float)sum / area;
out(x,y) = mean;
}
}
template<typename Tout, typename Tin>
void BoxFilterIntegralImage(Image<Tout> out, Image<Tin> IntegralImageT, int rad)
{
dim3 gridDim, blockDim;
InitDimFromOutputImageOver(blockDim,gridDim, out);
hipLaunchKernelGGL(( KernBoxFilterIntegralImage<Tout,Tin>), dim3(gridDim),dim3(blockDim), 0, 0, out,IntegralImageT,rad);
}
// Instantiate useful versions
template KANGAROO_EXPORT void BoxFilterIntegralImage(Image<float>, Image<int>, int);
template KANGAROO_EXPORT void BoxFilterIntegralImage(Image<float>, Image<float>, int);
}
|
6d65a5055ab4ccb59bff9d465095376393f1814f.cu
|
#include "cu_integral_image.h"
#include "launch_utils.h"
#include "CUDA_SDK/sharedmem.h"
namespace roo
{
//////////////////////////////////////////////////////
// Image Transpose
// Efficient Integral Image Computation on the GPU
// Berkin Bilgic, Berthold K.P. Horn, Ichiro Masaki
//////////////////////////////////////////////////////
template<typename Tout, typename Tin, int BLOCK_DIM>
__global__ void KernTranspose(Image<Tout> out, Image<Tin> in)
{
__shared__ Tin temp[BLOCK_DIM][BLOCK_DIM+1];
int xIndex = blockIdx.x*BLOCK_DIM + threadIdx.x;
int yIndex = blockIdx.y*BLOCK_DIM + threadIdx.y;
if((xIndex < in.w) && (yIndex < in.h)) {
temp[threadIdx.y][threadIdx.x] = in(xIndex,yIndex);
}
__syncthreads();
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < in.h) && (yIndex < in.w)) {
out(xIndex,yIndex) = temp[threadIdx.x][threadIdx.y];
}
}
template<typename Tout, typename Tin>
void Transpose(Image<Tout> out, Image<Tin> in)
{
dim3 gridDim, blockDim;
InitDimFromOutputImageOver(blockDim,gridDim,in, 16,16);
KernTranspose<Tout,Tin,16><<<gridDim,blockDim>>>(out,in);
}
// Instantiate useful versions
template KANGAROO_EXPORT void Transpose(Image<unsigned char>,Image<unsigned char>);
template KANGAROO_EXPORT void Transpose(Image<int>,Image<int>);
template KANGAROO_EXPORT void Transpose(Image<float>,Image<float>);
//////////////////////////////////////////////////////
// PrefixSum
// Efficient Integral Image Computation on the GPU
// Berkin Bilgic, Berthold K.P. Horn, Ichiro Masaki
//////////////////////////////////////////////////////
template<typename Tout, typename Tin>
inline __device__
void PrefixSum(Tout* output, Tin* input, int w, int nextpow2)
{
SharedMemory<Tout> shared;
Tout* temp = shared.getPointer();
const int tdx = threadIdx.x;
int offset = 1;
const int tdx2 = 2*tdx;
const int tdx2p = tdx2 + 1;
temp[tdx2] = tdx2 < w ? input[tdx2] : 0;
temp[tdx2p] = tdx2p < w ? input[tdx2p] : 0;
for(int d = nextpow2>>1; d > 0; d >>= 1) {
__syncthreads();
if(tdx < d)
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if(tdx == 0) temp[nextpow2 - 1] = 0;
for(int d = 1; d < nextpow2; d *= 2) {
offset >>= 1;
__syncthreads();
if(tdx < d )
{
int ai = offset*(tdx2p)-1;
int bi = offset*(tdx2+2)-1;
Tout t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
if(tdx2 < w) output[tdx2] = temp[tdx2];
if(tdx2p < w) output[tdx2p] = temp[tdx2p];
}
template<typename Tout, typename Tin>
__global__ void KernPrefixSumRows(Image<Tout> out, Image<Tin> in)
{
const int row = blockIdx.y;
PrefixSum<Tout,Tin>(out.RowPtr(row), in.RowPtr(row), in.w, 2*blockDim.x );
}
template<typename Tout, typename Tin>
void PrefixSumRows(Image<Tout> out, Image<Tin> in)
{
dim3 blockDim = dim3( 1, 1);
while(blockDim.x < ceil(in.w/2.0f)) blockDim.x <<= 1;
const dim3 gridDim = dim3( 1, in.h );
KernPrefixSumRows<<<gridDim,blockDim,2*sizeof(Tout)*blockDim.x>>>(out,in);
}
// Instantiate useful versions
template KANGAROO_EXPORT void PrefixSumRows(Image<int>, Image<unsigned char>);
template KANGAROO_EXPORT void PrefixSumRows(Image<int>, Image<int>);
template KANGAROO_EXPORT void PrefixSumRows(Image<float>, Image<float>);
//////////////////////////////////////////////////////
// Large Radius Box Filter using Integral Image
//////////////////////////////////////////////////////
template<typename Tout, typename Tin>
__global__ void KernBoxFilterIntegralImage(Image<Tout> out, Image<Tin> IntegralImageT, int rad)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if(out.InBounds(x,y)) {
const int minx = max(0,x-rad);
const int maxx = min((int)out.w-1,x+rad);
const int miny = max(0,y-rad);
const int maxy = min((int)out.h-1,y+rad);
const int winw = maxx - minx;
const int winh = maxy - miny;
const int area = winw * winh;
const Tin A = IntegralImageT(miny,minx);
const Tin B = IntegralImageT(miny,maxx);
const Tin C = IntegralImageT(maxy,maxx);
const Tin D = IntegralImageT(maxy,minx);
const Tin sum = C+A-B-D;
const Tout mean = (float)sum / area;
out(x,y) = mean;
}
}
template<typename Tout, typename Tin>
void BoxFilterIntegralImage(Image<Tout> out, Image<Tin> IntegralImageT, int rad)
{
dim3 gridDim, blockDim;
InitDimFromOutputImageOver(blockDim,gridDim, out);
KernBoxFilterIntegralImage<Tout,Tin><<<gridDim,blockDim>>>(out,IntegralImageT,rad);
}
// Instantiate useful versions
template KANGAROO_EXPORT void BoxFilterIntegralImage(Image<float>, Image<int>, int);
template KANGAROO_EXPORT void BoxFilterIntegralImage(Image<float>, Image<float>, int);
}
|
matrixInverseBLAS.hip
|
// !!! This is a file automatically generated by hipify!!!
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Inverse Matrix by CUBLAS library
8.3. LU cublas<t>getrfBatched(): http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-getrfbatched in cuda5.0
8.4. cublas<t>getriBatched(): http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-getribatched in cuda5.5
10.2. LU cusparse<t>csrilu0 : http://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csrilu0
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <hip/hip_runtime.h>
// includes CUDA BLAS
#include <rocblas.h>
#include <cusparse_v2.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#include "matrixInverseBLAS.cuh"
#define BENCH_MATRIX_EXP 7 //2~10
#define BENCH_MATRIX_ROWS (1<<BENCH_MATRIX_EXP)
#define CUBLAS_TEST_COUNT (1) // 10~1000
#define SWITCH_CHAR '-'
__inline__ __device__ __host__ float cuGet(double x)
{
return float(x);
}
extern "C"
void computeGold(float *reference, float *idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
testKernel(float *g_idata, float *g_odata)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
sdata[tid] = g_idata[tid];
__syncthreads();
// perform some computations
sdata[tid] = (float) num_threads * sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
int verifyResult( T_ELEM *A, T_ELEM *B , int n )
{
T_ELEM *C = (T_ELEM *)malloc( n * n * sizeof(T_ELEM));
/* Host implementation of a simple version of sgemm */
int i;
int j;
int k;
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
T_ELEM prod = 0;
for (k = 0; k < n; ++k)
{
prod += A[k * n + i] * B[j * n + k];
}
C[j * n + i] = prod;
}
}
for (i = 0; i < n; ++i)
{
if( fabs( C[i * n + i] - 1.0f ) > 1.0e-3 )
{
free(C);
return i;
}
}
free(C);
return 0;
}
int verifyResultBLAS( T_ELEM *A, T_ELEM *B , int n )
{
hipblasStatus_t status;
// blas config
hipblasHandle_t handle;
hipblasCreate(&handle);
float alpha = 1.0f;
float beta = 0.0f;
T_ELEM *d_C;
hipMalloc((void **)&d_C, n * n * sizeof(T_ELEM));
T_ELEM *C = (T_ELEM *)malloc( n * n * sizeof(T_ELEM));
status = hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &alpha, A, n, B, n, &beta, d_C, n);
hipMemcpy( C, d_C, n * n * sizeof(T_ELEM) , hipMemcpyDeviceToHost );
hipFree( d_C );
hipblasDestroy(handle);
for (int i = 0; i < n; ++i)
{
if( fabs( C[i * n + i] - 1.0f ) > 1.0e-3 )
{
free(C);
return i;
}
}
free(C);
return 0;
}
// LU
int luDecomposeSparse( T_ELEM **devPtrA , int n )
{
/* Get handle to the CUSPARSE context */
hipsparseHandle_t cusparseHandle = 0;
hipsparseStatus_t cusparseStatus;
cusparseStatus = hipsparseCreate(&cusparseHandle);
hipsparseMatDescr_t descr = 0;
cusparseStatus = hipsparseCreateMatDescr(&descr);
hipsparseSetMatType(descr,HIPSPARSE_MATRIX_TYPE_GENERAL);
hipsparseSetMatIndexBase(descr,HIPSPARSE_INDEX_BASE_ZERO);
// get element count of nonzero
int nz = 0;
int *nnzPerRow = NULL;
hipMalloc( (void**)&nnzPerRow, n*sizeof(int) );
hipsparseSnnz( cusparseHandle, HIPSPARSE_DIRECTION_ROW, n,
n, descr,
devPtrA[0],
n, nnzPerRow, &nz );
// sparse matrix
int *d_col, *d_row;
float *d_val;
checkCudaErrors(hipMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_row, (n+1)*sizeof(int)));
checkCudaErrors(hipMalloc((void **)&d_val, nz*sizeof(float)));
cusparseStatus = hipsparseSdense2csr( cusparseHandle, n,
n, descr,
devPtrA[0], n,
nnzPerRow,
d_val, d_row, d_col) ;
if (cusparseStatus != HIPBLAS_STATUS_SUCCESS)
{
hipError_t cuError = hipGetLastError();
fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", cusparseStatus, cuError,hipGetErrorString(cuError));
return -1;
}
return 0;
}
// blas C[i] = A[i] ^ -1
// inverse batch of matrices
int inverseMatrixBLAS( T_ELEM **A , T_ELEM **C , int matrixRows , int sizeBatch ,int bDebug )
{
int matrixSize = matrixRows * matrixRows;
hipError_t err1;
hipblasStatus_t status;
// temp data
T_ELEM **devPtrA = 0;
T_ELEM **devPtrA_dev = NULL;
T_ELEM **devPtrC = 0;
T_ELEM **devPtrC_dev = NULL;
// temp data for matrix A, input matrix
devPtrA =(T_ELEM **)malloc( sizeBatch * sizeof(T_ELEM));
for (int i = 0; i < sizeBatch ; i++)
{
hipMalloc((void **)&devPtrA[i], matrixSize * sizeof(T_ELEM));
hipblasSetMatrix( matrixRows, matrixRows, sizeof(T_ELEM), A[i], matrixRows, devPtrA[i], matrixRows);
}
hipMalloc((void **)&devPtrA_dev, sizeBatch * sizeof(T_ELEM));
hipMemcpy(devPtrA_dev, devPtrA, sizeBatch * sizeof(*devPtrA), hipMemcpyHostToDevice);
// temp data for matrix C, output inverse matrix of A
devPtrC =(T_ELEM **)malloc( sizeBatch * sizeof(T_ELEM));
for (int i = 0; i < sizeBatch ; i++)
{
hipMalloc((void **)&devPtrC[i], matrixSize * sizeof(T_ELEM));
}
hipMalloc((void **)&devPtrC_dev, sizeBatch * sizeof(T_ELEM));
hipMemcpy(devPtrC_dev, devPtrC, sizeBatch * sizeof(*devPtrC), hipMemcpyHostToDevice);
// temp data middle
int *d_pivotArray = NULL;
int *d_infoArray = NULL;
hipMalloc( (void**)&d_pivotArray, matrixRows*sizeBatch*sizeof(int) );
hipMalloc( (void**)&d_infoArray, sizeBatch*sizeof(int) );
int *h_infoArray = NULL;
// blas config
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSetStream(handle, 0 );
// timer begin
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
#if 0
luDecomposeSparse( devPtrA, matrixRows );
//d_pivotArray[];
//d_infoArray[];
#else
// LU factorization LU
status = hipblasSgetrfBatched(handle,
matrixRows,
devPtrA_dev,
matrixRows,
d_pivotArray,
d_infoArray,
sizeBatch);
if (status != HIPBLAS_STATUS_SUCCESS)
{
hipError_t cuError = hipGetLastError();
fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", status, cuError,hipGetErrorString(cuError));
return -1;
}
#endif
// LU
if( bDebug )
{
h_infoArray = (int*)malloc( sizeBatch*sizeof(int) );
hipMemcpy( h_infoArray, d_infoArray, sizeBatch*sizeof(int), hipMemcpyDeviceToHost );
for(int i=0;i<sizeBatch;i++)
{
if( h_infoArray[i] == 0 )
{
//fprintf(stderr, "%d-th matrix lu-decompose successed, !\n", i );
continue;
}
else if (h_infoArray[i] > 0)
{
fprintf(stderr, "%d-th matrix lu-decompose failed, U(%d,%d) = 0!\n", i, h_infoArray[i], h_infoArray[i] );
continue;
}
else
{
fprintf(stderr, "%d-th matrix lu-decompose failed, the %d-th parameter had an illegal value!\n", i, -h_infoArray[i] );
continue;
}
}
}
#if 1// if 0 LU 1536*1536700-1800ms GPU480
// inversion of matrices A, output result to matrices C
status = hipblasSgetriBatched(handle,
matrixRows,
devPtrA_dev,
matrixRows,
d_pivotArray,
devPtrC_dev,
matrixRows,
d_infoArray,
sizeBatch);
if (status != HIPBLAS_STATUS_SUCCESS)
{
hipError_t cuError = hipGetLastError();
fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", status, cuError,hipGetErrorString(cuError));
return -1 ;
}
#endif
//
if( bDebug )
{
hipMemcpy( h_infoArray, d_infoArray, sizeBatch*sizeof(int), hipMemcpyDeviceToHost );
for(int i=0;i<sizeBatch;i++)
{
if( h_infoArray[i] == 0 )
{
//fprintf(stderr, "%d-th matrix lu-decompose successed, !\n", i );
continue;
}
else if (h_infoArray[i] > 0)
{
fprintf(stderr, "%d-th matrix lu-decompose failed, U(%d,%d) = 0!\n", i, h_infoArray[i], h_infoArray[i] );
continue;
}
}
}
// timer end
hipError_t cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess)
{
fprintf(stderr, "!!!! GPU program execution error on hipDeviceSynchronize : hipError_t=%d,(%s)\n", cudaStatus,hipGetErrorString(cudaStatus));
return -1;
}
sdkStopTimer(&timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
// gpu -> cpu
for(int i=0; i< sizeBatch; i++)
{
hipMemcpy( C[i], devPtrC[i], matrixSize * sizeof(T_ELEM) , hipMemcpyDeviceToHost );
}
//
if( bDebug )
{
int bStatus = 0;
for(int i=0; i< sizeBatch; i++)
{
#if 0
bStatus = verifyResultBLAS( devPtrA[i], devPtrC[i], matrixRows );
#else
bStatus = verifyResult( A[i], C[i], matrixRows );
#endif
if( bStatus )
{
printf( "Matrix Inverse Wrong! A*A^(-1) [%d,%d] !=1 \n" ,bStatus ,bStatus );
break;
}
}
}
//
for(int i = 0; i < sizeBatch; ++i)
{
if(devPtrA[i]) hipFree(devPtrA[i]);
if(devPtrC[i]) hipFree(devPtrC[i]);
}
if (devPtrA) free(devPtrA);
if (devPtrC) free(devPtrC);
if (devPtrA_dev) hipFree(devPtrA_dev);
if (devPtrC_dev) hipFree(devPtrC_dev);
if (d_pivotArray) hipFree(d_pivotArray);
if (d_infoArray) hipFree(d_infoArray);
if (h_infoArray) free(h_infoArray);
hipblasDestroy(handle);
return 0;
}
// blas C = A ^ -1
// inverse a matrix
int inverseMatrixBLAS( T_ELEM *A , T_ELEM *C, int matrixRows, int bDebug )
{
// matrix A, input matrix
T_ELEM **ABatch = (T_ELEM **)malloc( 1 * sizeof(T_ELEM*));
*ABatch = A;
// matrix C, output inverse matrix of A
T_ELEM **CBatch = (T_ELEM **)malloc( 1 * sizeof(T_ELEM*));
*CBatch = C;
inverseMatrixBLAS( ABatch, CBatch, matrixRows, 1, bDebug ) ;
if (ABatch) free (ABatch);
if (CBatch) free (CBatch);
return 0;
}
|
matrixInverseBLAS.cu
|
////////////////////////////////////////////////////////////////////////////
//
// Copyright 1993-2013 NVIDIA Corporation. All rights reserved.
//
// Please refer to the NVIDIA end user license agreement (EULA) associated
// with this source code for terms and conditions that govern your use of
// this software. Any use, reproduction, disclosure, or distribution of
// this software and related documentation outside the terms of the EULA
// is strictly prohibited.
//
////////////////////////////////////////////////////////////////////////////
/* Inverse Matrix by CUBLAS library
8.3. LU分解 cublas<t>getrfBatched(): http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-getrfbatched in cuda5.0
8.4. 求逆 cublas<t>getriBatched(): http://docs.nvidia.com/cuda/cublas/index.html#cublas-lt-t-gt-getribatched in cuda5.5
10.2. 稀疏矩阵LU分解 cusparse<t>csrilu0 : http://docs.nvidia.com/cuda/cusparse/index.html#cusparse-lt-t-gt-csrilu0
*/
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes CUDA
#include <cuda_runtime.h>
// includes CUDA BLAS
#include <cublas_v2.h>
#include <cusparse_v2.h>
// includes, project
#include <helper_cuda.h>
#include <helper_functions.h> // helper functions for SDK examples
#include "matrixInverseBLAS.cuh"
#define BENCH_MATRIX_EXP 7 //2~10
#define BENCH_MATRIX_ROWS (1<<BENCH_MATRIX_EXP)
#define CUBLAS_TEST_COUNT (1) // 10~1000
#define SWITCH_CHAR '-'
__inline__ __device__ __host__ float cuGet(double x)
{
return float(x);
}
extern "C"
void computeGold(float *reference, float *idata, const unsigned int len);
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
__global__ void
testKernel(float *g_idata, float *g_odata)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
sdata[tid] = g_idata[tid];
__syncthreads();
// perform some computations
sdata[tid] = (float) num_threads * sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
int verifyResult( T_ELEM *A, T_ELEM *B , int n )
{
T_ELEM *C = (T_ELEM *)malloc( n * n * sizeof(T_ELEM));
/* Host implementation of a simple version of sgemm */
int i;
int j;
int k;
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
T_ELEM prod = 0;
for (k = 0; k < n; ++k)
{
prod += A[k * n + i] * B[j * n + k];
}
C[j * n + i] = prod;
}
}
for (i = 0; i < n; ++i)
{
if( fabs( C[i * n + i] - 1.0f ) > 1.0e-3 )
{
free(C);
return i;
}
}
free(C);
return 0;
}
int verifyResultBLAS( T_ELEM *A, T_ELEM *B , int n )
{
cublasStatus_t status;
// blas config
cublasHandle_t handle;
cublasCreate(&handle);
float alpha = 1.0f;
float beta = 0.0f;
T_ELEM *d_C;
cudaMalloc((void **)&d_C, n * n * sizeof(T_ELEM));
T_ELEM *C = (T_ELEM *)malloc( n * n * sizeof(T_ELEM));
status = cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &alpha, A, n, B, n, &beta, d_C, n);
cudaMemcpy( C, d_C, n * n * sizeof(T_ELEM) , cudaMemcpyDeviceToHost );
cudaFree( d_C );
cublasDestroy(handle);
for (int i = 0; i < n; ++i)
{
if( fabs( C[i * n + i] - 1.0f ) > 1.0e-3 )
{
free(C);
return i;
}
}
free(C);
return 0;
}
// LU 分解,稀疏矩阵
int luDecomposeSparse( T_ELEM **devPtrA , int n )
{
/* Get handle to the CUSPARSE context */
cusparseHandle_t cusparseHandle = 0;
cusparseStatus_t cusparseStatus;
cusparseStatus = cusparseCreate(&cusparseHandle);
cusparseMatDescr_t descr = 0;
cusparseStatus = cusparseCreateMatDescr(&descr);
cusparseSetMatType(descr,CUSPARSE_MATRIX_TYPE_GENERAL);
cusparseSetMatIndexBase(descr,CUSPARSE_INDEX_BASE_ZERO);
// get element count of nonzero
int nz = 0;
int *nnzPerRow = NULL;
cudaMalloc( (void**)&nnzPerRow, n*sizeof(int) );
cusparseSnnz( cusparseHandle, CUSPARSE_DIRECTION_ROW, n,
n, descr,
devPtrA[0],
n, nnzPerRow, &nz );
// sparse matrix
int *d_col, *d_row;
float *d_val;
checkCudaErrors(cudaMalloc((void **)&d_col, nz*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_row, (n+1)*sizeof(int)));
checkCudaErrors(cudaMalloc((void **)&d_val, nz*sizeof(float)));
cusparseStatus = cusparseSdense2csr( cusparseHandle, n,
n, descr,
devPtrA[0], n,
nnzPerRow,
d_val, d_row, d_col) ;
if (cusparseStatus != CUBLAS_STATUS_SUCCESS)
{
cudaError_t cuError = cudaGetLastError();
fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", cusparseStatus, cuError,cudaGetErrorString(cuError));
return -1;
}
return 0;
}
// 批量矩阵求逆,调用blas库, C[i] = A[i] ^ -1
// inverse batch of matrices
int inverseMatrixBLAS( T_ELEM **A , T_ELEM **C , int matrixRows , int sizeBatch ,int bDebug )
{
int matrixSize = matrixRows * matrixRows;
cudaError_t err1;
cublasStatus_t status;
// temp data
T_ELEM **devPtrA = 0;
T_ELEM **devPtrA_dev = NULL;
T_ELEM **devPtrC = 0;
T_ELEM **devPtrC_dev = NULL;
// temp data for matrix A, input matrix
devPtrA =(T_ELEM **)malloc( sizeBatch * sizeof(T_ELEM));
for (int i = 0; i < sizeBatch ; i++)
{
cudaMalloc((void **)&devPtrA[i], matrixSize * sizeof(T_ELEM));
cublasSetMatrix( matrixRows, matrixRows, sizeof(T_ELEM), A[i], matrixRows, devPtrA[i], matrixRows);
}
cudaMalloc((void **)&devPtrA_dev, sizeBatch * sizeof(T_ELEM));
cudaMemcpy(devPtrA_dev, devPtrA, sizeBatch * sizeof(*devPtrA), cudaMemcpyHostToDevice);
// temp data for matrix C, output inverse matrix of A
devPtrC =(T_ELEM **)malloc( sizeBatch * sizeof(T_ELEM));
for (int i = 0; i < sizeBatch ; i++)
{
cudaMalloc((void **)&devPtrC[i], matrixSize * sizeof(T_ELEM));
}
cudaMalloc((void **)&devPtrC_dev, sizeBatch * sizeof(T_ELEM));
cudaMemcpy(devPtrC_dev, devPtrC, sizeBatch * sizeof(*devPtrC), cudaMemcpyHostToDevice);
// temp data middle
int *d_pivotArray = NULL;
int *d_infoArray = NULL;
cudaMalloc( (void**)&d_pivotArray, matrixRows*sizeBatch*sizeof(int) );
cudaMalloc( (void**)&d_infoArray, sizeBatch*sizeof(int) );
int *h_infoArray = NULL;
// blas config
cublasHandle_t handle;
cublasCreate(&handle);
cublasSetStream(handle, 0 );
// timer begin
StopWatchInterface *timer = 0;
sdkCreateTimer(&timer);
sdkStartTimer(&timer);
#if 0
luDecomposeSparse( devPtrA, matrixRows );
//d_pivotArray[];
//d_infoArray[];
#else
// LU factorization , 矩阵LU三角分解
status = cublasSgetrfBatched(handle,
matrixRows,
devPtrA_dev,
matrixRows,
d_pivotArray,
d_infoArray,
sizeBatch);
if (status != CUBLAS_STATUS_SUCCESS)
{
cudaError_t cuError = cudaGetLastError();
fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", status, cuError,cudaGetErrorString(cuError));
return -1;
}
#endif
// 检测LU分解是否顺利执行
if( bDebug )
{
h_infoArray = (int*)malloc( sizeBatch*sizeof(int) );
cudaMemcpy( h_infoArray, d_infoArray, sizeBatch*sizeof(int), cudaMemcpyDeviceToHost );
for(int i=0;i<sizeBatch;i++)
{
if( h_infoArray[i] == 0 )
{
//fprintf(stderr, "%d-th matrix lu-decompose successed, !\n", i );
continue;
}
else if (h_infoArray[i] > 0)
{
fprintf(stderr, "%d-th matrix lu-decompose failed, U(%d,%d) = 0!\n", i, h_infoArray[i], h_infoArray[i] );
continue;
}
else
{
fprintf(stderr, "%d-th matrix lu-decompose failed, the %d-th parameter had an illegal value!\n", i, -h_infoArray[i] );
continue;
}
}
}
#if 1// if 0 单独测上一步LU分解的时间, 1536*1536矩阵时间分布:700-1800ms GPU480
// inversion of matrices A, output result to matrices C , 三角矩阵求逆
status = cublasSgetriBatched(handle,
matrixRows,
devPtrA_dev,
matrixRows,
d_pivotArray,
devPtrC_dev,
matrixRows,
d_infoArray,
sizeBatch);
if (status != CUBLAS_STATUS_SUCCESS)
{
cudaError_t cuError = cudaGetLastError();
fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", status, cuError,cudaGetErrorString(cuError));
return -1 ;
}
#endif
// 检测三角矩阵求逆是否顺利执行
if( bDebug )
{
cudaMemcpy( h_infoArray, d_infoArray, sizeBatch*sizeof(int), cudaMemcpyDeviceToHost );
for(int i=0;i<sizeBatch;i++)
{
if( h_infoArray[i] == 0 )
{
//fprintf(stderr, "%d-th matrix lu-decompose successed, !\n", i );
continue;
}
else if (h_infoArray[i] > 0)
{
fprintf(stderr, "%d-th matrix lu-decompose failed, U(%d,%d) = 0!\n", i, h_infoArray[i], h_infoArray[i] );
continue;
}
}
}
// timer end
cudaError_t cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "!!!! GPU program execution error on cudaThreadSynchronize : cudaError=%d,(%s)\n", cudaStatus,cudaGetErrorString(cudaStatus));
return -1;
}
sdkStopTimer(&timer);
printf("Processing time: %f (ms)\n", sdkGetTimerValue(&timer));
sdkDeleteTimer(&timer);
// 逆矩阵结果从显存返回内存,gpu -> cpu
for(int i=0; i< sizeBatch; i++)
{
cudaMemcpy( C[i], devPtrC[i], matrixSize * sizeof(T_ELEM) , cudaMemcpyDeviceToHost );
}
// 验证逆矩阵计算结果是否正确
if( bDebug )
{
int bStatus = 0;
for(int i=0; i< sizeBatch; i++)
{
#if 0
bStatus = verifyResultBLAS( devPtrA[i], devPtrC[i], matrixRows );
#else
bStatus = verifyResult( A[i], C[i], matrixRows );
#endif
if( bStatus )
{
printf( "Matrix Inverse Wrong! A*A^(-1) [%d,%d] !=1 \n" ,bStatus ,bStatus );
break;
}
}
}
// 释放辅助计算用到的临时内存
for(int i = 0; i < sizeBatch; ++i)
{
if(devPtrA[i]) cudaFree(devPtrA[i]);
if(devPtrC[i]) cudaFree(devPtrC[i]);
}
if (devPtrA) free(devPtrA);
if (devPtrC) free(devPtrC);
if (devPtrA_dev) cudaFree(devPtrA_dev);
if (devPtrC_dev) cudaFree(devPtrC_dev);
if (d_pivotArray) cudaFree(d_pivotArray);
if (d_infoArray) cudaFree(d_infoArray);
if (h_infoArray) free(h_infoArray);
cublasDestroy(handle);
return 0;
}
// 单个矩阵求逆,调用blas库, C = A ^ -1
// inverse a matrix
int inverseMatrixBLAS( T_ELEM *A , T_ELEM *C, int matrixRows, int bDebug )
{
// 初始化matrix A, input matrix
T_ELEM **ABatch = (T_ELEM **)malloc( 1 * sizeof(T_ELEM*));
*ABatch = A;
// matrix C, output inverse matrix of A
T_ELEM **CBatch = (T_ELEM **)malloc( 1 * sizeof(T_ELEM*));
*CBatch = C;
inverseMatrixBLAS( ABatch, CBatch, matrixRows, 1, bDebug ) ;
if (ABatch) free (ABatch);
if (CBatch) free (CBatch);
return 0;
}
|
5729b9ce46dbc9666812b7e0b0ec4e884110dc18.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
// any 2 <= mod <= 2^31 should work
__host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) {
unsigned c = a+b;
return c >= mod ? c-mod : c;
}
// each block solves a case
// block size must be power of 2 and <= 2^(n-1)
// thread size must be n - 1
__global__ void my_hamilton(int n, int *adj, int *ret, unsigned int mod) {
__shared__ unsigned qc[31];
__shared__ unsigned a_n_1[31]; // adj[n-1][i]
int tid = threadIdx.x;
int lv = 31 - __clz(gridDim.x);
unsigned s = blockIdx.x; // case as a bit field
unsigned total = 0;
// prefetch
unsigned a_i = 0;
for (int i = 0; i < n; i++) {
a_i = a_i | adj[tid*n + i]<<i;
}
a_n_1[tid] = adj[(n-1)*n + tid];
// test each case in this block
for (unsigned stride = 0; stride < 1U<<(n-1-lv); stride++, s += 1U<<lv) {
// active means this thread is selected
unsigned active = s>>tid & 1;
// first transition
qc[tid] = active * (a_i>>(n-1) & 1);
unsigned row = active * a_i;
__syncthreads();
// calculate each transition, uses GPU SIMD feature
for (int t = 1; t < n-1; t++) {
unsigned sum = 0;
for (int i = 0; i < n-1; i++) {
sum = mod_sum(sum, qc[i] * (row>>i & 1), mod);
}
__syncthreads();
qc[tid] = sum;
}
// last transition
unsigned count = 0;
for (int i = 0; i < n-1; i++) {
count = mod_sum(count, qc[i] * a_n_1[i], mod);
}
// adjust sign for inclusion-exclusion principle
int sign = (n - __popc(s)) & 1;
unsigned count_with_sign = sign ? count : (count ? mod-count : 0);
total = mod_sum(total, count_with_sign, mod);
}
if (tid == 0) {
// output total for this block
ret[blockIdx.x] = total;
}
}
// thread size must be >= 64 and power of 2
__global__ void sum_all(int n, int *data, int *sum, unsigned mod) {
__shared__ int tmp_sum[1024];
int blockSize = blockDim.x;
int stride = gridDim.x * blockSize;
int id = threadIdx.x;
int i = id + blockSize * blockIdx.x;
// sum part of data
tmp_sum[id] = 0;
while (i < n) {
tmp_sum[id] = mod_sum(tmp_sum[id], data[i], mod);
i += stride;
}
__syncthreads();
// merge threads
if (blockSize >= 1024) {
if (id < 512) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 512], mod);
__syncthreads();
}
if (blockSize >= 512) {
if (id < 256) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 256], mod);
__syncthreads();
}
if (blockSize >= 256) {
if (id < 128) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 128], mod);
__syncthreads();
}
if (blockSize >= 128) {
if (id < 64) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 64], mod);
__syncthreads();
}
if (id < 32) {
// now, only 1 warp is active
volatile int *tmp = tmp_sum;
tmp[id] = mod_sum(tmp[id], tmp[id + 32], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 16], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 8], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 4], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 2], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 1], mod);
}
// write back to global memory
if (id == 0) {
sum[blockIdx.x] = tmp_sum[0];
}
}
int n, a[32*32], sum[1<<7];
int main(int argc, char *argv[]) {
if (scanf("%d", &n) != 1) return 1;
if (n < 3 || n > 32) return 1;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int aij = 1; scanf("%d", &aij);
if (i == j) a[i*n+j] = 0;
else a[i*n+j] = aij;
}
}
int block_size = 1;
const int MAX_BLOCK_LV = 16;
if (n <= MAX_BLOCK_LV) block_size = 1<<(n-1);
else block_size = 1<<MAX_BLOCK_LV;
int sum_size = 128;
int *gpu_a, *gpu_ans, *gpu_sum;
hipMalloc(&gpu_a, sizeof a);
hipMalloc(&gpu_ans, sizeof(int) * block_size); // only resides in GPU!
hipMalloc(&gpu_sum, sizeof(int) * sum_size);
hipMemcpy(gpu_a, a, sizeof a, hipMemcpyHostToDevice);
for (int i = 1; i < argc; i++) {
unsigned mod = 0;
sscanf(argv[i], "%u", &mod);
hipLaunchKernelGGL(( my_hamilton), dim3(block_size), dim3(n-1), 0, 0, n, gpu_a, gpu_ans, mod);
hipLaunchKernelGGL(( sum_all), dim3(sum_size), dim3(256), 0, 0, block_size, gpu_ans, gpu_sum, mod);
hipDeviceSynchronize();
hipMemcpy(sum, gpu_sum, sizeof(int) * sum_size, hipMemcpyDeviceToHost);
unsigned ans = 0;
for (int j = 0; j < sum_size; j++) ans = mod_sum(ans, sum[j], mod);
printf("%u\n", ans);
}
}
|
5729b9ce46dbc9666812b7e0b0ec4e884110dc18.cu
|
#include<stdio.h>
// any 2 <= mod <= 2^31 should work
__host__ __device__ unsigned mod_sum(unsigned a, unsigned b, unsigned mod) {
unsigned c = a+b;
return c >= mod ? c-mod : c;
}
// each block solves a case
// block size must be power of 2 and <= 2^(n-1)
// thread size must be n - 1
__global__ void my_hamilton(int n, int *adj, int *ret, unsigned int mod) {
__shared__ unsigned qc[31];
__shared__ unsigned a_n_1[31]; // adj[n-1][i]
int tid = threadIdx.x;
int lv = 31 - __clz(gridDim.x);
unsigned s = blockIdx.x; // case as a bit field
unsigned total = 0;
// prefetch
unsigned a_i = 0;
for (int i = 0; i < n; i++) {
a_i = a_i | adj[tid*n + i]<<i;
}
a_n_1[tid] = adj[(n-1)*n + tid];
// test each case in this block
for (unsigned stride = 0; stride < 1U<<(n-1-lv); stride++, s += 1U<<lv) {
// active means this thread is selected
unsigned active = s>>tid & 1;
// first transition
qc[tid] = active * (a_i>>(n-1) & 1);
unsigned row = active * a_i;
__syncthreads();
// calculate each transition, uses GPU SIMD feature
for (int t = 1; t < n-1; t++) {
unsigned sum = 0;
for (int i = 0; i < n-1; i++) {
sum = mod_sum(sum, qc[i] * (row>>i & 1), mod);
}
__syncthreads();
qc[tid] = sum;
}
// last transition
unsigned count = 0;
for (int i = 0; i < n-1; i++) {
count = mod_sum(count, qc[i] * a_n_1[i], mod);
}
// adjust sign for inclusion-exclusion principle
int sign = (n - __popc(s)) & 1;
unsigned count_with_sign = sign ? count : (count ? mod-count : 0);
total = mod_sum(total, count_with_sign, mod);
}
if (tid == 0) {
// output total for this block
ret[blockIdx.x] = total;
}
}
// thread size must be >= 64 and power of 2
__global__ void sum_all(int n, int *data, int *sum, unsigned mod) {
__shared__ int tmp_sum[1024];
int blockSize = blockDim.x;
int stride = gridDim.x * blockSize;
int id = threadIdx.x;
int i = id + blockSize * blockIdx.x;
// sum part of data
tmp_sum[id] = 0;
while (i < n) {
tmp_sum[id] = mod_sum(tmp_sum[id], data[i], mod);
i += stride;
}
__syncthreads();
// merge threads
if (blockSize >= 1024) {
if (id < 512) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 512], mod);
__syncthreads();
}
if (blockSize >= 512) {
if (id < 256) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 256], mod);
__syncthreads();
}
if (blockSize >= 256) {
if (id < 128) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 128], mod);
__syncthreads();
}
if (blockSize >= 128) {
if (id < 64) tmp_sum[id] = mod_sum(tmp_sum[id], tmp_sum[id + 64], mod);
__syncthreads();
}
if (id < 32) {
// now, only 1 warp is active
volatile int *tmp = tmp_sum;
tmp[id] = mod_sum(tmp[id], tmp[id + 32], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 16], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 8], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 4], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 2], mod);
tmp[id] = mod_sum(tmp[id], tmp[id + 1], mod);
}
// write back to global memory
if (id == 0) {
sum[blockIdx.x] = tmp_sum[0];
}
}
int n, a[32*32], sum[1<<7];
int main(int argc, char *argv[]) {
if (scanf("%d", &n) != 1) return 1;
if (n < 3 || n > 32) return 1;
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int aij = 1; scanf("%d", &aij);
if (i == j) a[i*n+j] = 0;
else a[i*n+j] = aij;
}
}
int block_size = 1;
const int MAX_BLOCK_LV = 16;
if (n <= MAX_BLOCK_LV) block_size = 1<<(n-1);
else block_size = 1<<MAX_BLOCK_LV;
int sum_size = 128;
int *gpu_a, *gpu_ans, *gpu_sum;
cudaMalloc(&gpu_a, sizeof a);
cudaMalloc(&gpu_ans, sizeof(int) * block_size); // only resides in GPU!
cudaMalloc(&gpu_sum, sizeof(int) * sum_size);
cudaMemcpy(gpu_a, a, sizeof a, cudaMemcpyHostToDevice);
for (int i = 1; i < argc; i++) {
unsigned mod = 0;
sscanf(argv[i], "%u", &mod);
my_hamilton<<<block_size, n-1>>>(n, gpu_a, gpu_ans, mod);
sum_all<<<sum_size, 256>>>(block_size, gpu_ans, gpu_sum, mod);
cudaDeviceSynchronize();
cudaMemcpy(sum, gpu_sum, sizeof(int) * sum_size, cudaMemcpyDeviceToHost);
unsigned ans = 0;
for (int j = 0; j < sum_size; j++) ans = mod_sum(ans, sum[j], mod);
printf("%u\n", ans);
}
}
|
3820f687b7f8ba468b77e74008d832df71a3c8d5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <unittest/unittest.h>
#include <thrust/fill.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/discard_iterator.h>
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <class Vector>
void TestFillSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
thrust::fill(v.begin() + 1, v.begin() + 4, (T) 7);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 7);
ASSERT_EQUAL(v[2], 7);
ASSERT_EQUAL(v[3], 7);
ASSERT_EQUAL(v[4], 4);
thrust::fill(v.begin() + 0, v.begin() + 3, (T) 8);
ASSERT_EQUAL(v[0], 8);
ASSERT_EQUAL(v[1], 8);
ASSERT_EQUAL(v[2], 8);
ASSERT_EQUAL(v[3], 7);
ASSERT_EQUAL(v[4], 4);
thrust::fill(v.begin() + 2, v.end(), (T) 9);
ASSERT_EQUAL(v[0], 8);
ASSERT_EQUAL(v[1], 8);
ASSERT_EQUAL(v[2], 9);
ASSERT_EQUAL(v[3], 9);
ASSERT_EQUAL(v[4], 9);
thrust::fill(v.begin(), v.end(), (T) 1);
ASSERT_EQUAL(v[0], 1);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 1);
ASSERT_EQUAL(v[3], 1);
ASSERT_EQUAL(v[4], 1);
}
DECLARE_VECTOR_UNITTEST(TestFillSimple);
void TestFillDiscardIterator(void)
{
// there's no result to check because fill returns void
thrust::fill(thrust::discard_iterator<thrust::host_space_tag>(),
thrust::discard_iterator<thrust::host_space_tag>(10),
13);
thrust::fill(thrust::discard_iterator<thrust::device_space_tag>(),
thrust::discard_iterator<thrust::device_space_tag>(10),
13);
}
DECLARE_UNITTEST(TestFillDiscardIterator);
template <class Vector>
void TestFillMixedTypes(void)
{
typedef typename Vector::value_type T;
Vector v(4);
thrust::fill(v.begin(), v.end(), (long) 10);
ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(v[1], 10);
ASSERT_EQUAL(v[2], 10);
ASSERT_EQUAL(v[3], 10);
thrust::fill(v.begin(), v.end(), (float) 20);
ASSERT_EQUAL(v[0], 20);
ASSERT_EQUAL(v[1], 20);
ASSERT_EQUAL(v[2], 20);
ASSERT_EQUAL(v[3], 20);
}
DECLARE_VECTOR_UNITTEST(TestFillMixedTypes);
template <typename T>
void TestFill(size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
thrust::fill(h_data.begin() + ::min((size_t)1, n), h_data.begin() + ::min((size_t)3, n), (T) 0);
thrust::fill(d_data.begin() + ::min((size_t)1, n), d_data.begin() + ::min((size_t)3, n), (T) 0);
ASSERT_EQUAL(h_data, d_data);
thrust::fill(h_data.begin() + ::min((size_t)117, n), h_data.begin() + ::min((size_t)367, n), (T) 1);
thrust::fill(d_data.begin() + ::min((size_t)117, n), d_data.begin() + ::min((size_t)367, n), (T) 1);
ASSERT_EQUAL(h_data, d_data);
thrust::fill(h_data.begin() + ::min((size_t)8, n), h_data.begin() + ::min((size_t)259, n), (T) 2);
thrust::fill(d_data.begin() + ::min((size_t)8, n), d_data.begin() + ::min((size_t)259, n), (T) 2);
ASSERT_EQUAL(h_data, d_data);
thrust::fill(h_data.begin() + ::min((size_t)3, n), h_data.end(), (T) 3);
thrust::fill(d_data.begin() + ::min((size_t)3, n), d_data.end(), (T) 3);
ASSERT_EQUAL(h_data, d_data);
thrust::fill(h_data.begin(), h_data.end(), (T) 4);
thrust::fill(d_data.begin(), d_data.end(), (T) 4);
ASSERT_EQUAL(h_data, d_data);
}
DECLARE_VARIABLE_UNITTEST(TestFill);
template <class Vector>
void TestFillNSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
typename Vector::iterator iter = thrust::fill_n(v.begin() + 1, 3, (T) 7);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 7);
ASSERT_EQUAL(v[2], 7);
ASSERT_EQUAL(v[3], 7);
ASSERT_EQUAL(v[4], 4);
ASSERT_EQUAL_QUIET(v.begin() + 4, iter);
iter = thrust::fill_n(v.begin() + 0, 3, (T) 8);
ASSERT_EQUAL(v[0], 8);
ASSERT_EQUAL(v[1], 8);
ASSERT_EQUAL(v[2], 8);
ASSERT_EQUAL(v[3], 7);
ASSERT_EQUAL(v[4], 4);
ASSERT_EQUAL_QUIET(v.begin() + 3, iter);
iter = thrust::fill_n(v.begin() + 2, 3, (T) 9);
ASSERT_EQUAL(v[0], 8);
ASSERT_EQUAL(v[1], 8);
ASSERT_EQUAL(v[2], 9);
ASSERT_EQUAL(v[3], 9);
ASSERT_EQUAL(v[4], 9);
ASSERT_EQUAL_QUIET(v.end(), iter);
iter = thrust::fill_n(v.begin(), v.size(), (T) 1);
ASSERT_EQUAL(v[0], 1);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 1);
ASSERT_EQUAL(v[3], 1);
ASSERT_EQUAL(v[4], 1);
ASSERT_EQUAL_QUIET(v.end(), iter);
}
DECLARE_VECTOR_UNITTEST(TestFillNSimple);
void TestFillNDiscardIterator(void)
{
thrust::discard_iterator<thrust::host_space_tag> h_result =
thrust::fill_n(thrust::discard_iterator<thrust::host_space_tag>(),
10,
13);
thrust::discard_iterator<thrust::device_space_tag> d_result =
thrust::fill_n(thrust::discard_iterator<thrust::device_space_tag>(),
10,
13);
thrust::discard_iterator<> reference(10);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_UNITTEST(TestFillNDiscardIterator);
template <class Vector>
void TestFillNMixedTypes(void)
{
typedef typename Vector::value_type T;
Vector v(4);
typename Vector::iterator iter = thrust::fill_n(v.begin(), v.size(), (long) 10);
ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(v[1], 10);
ASSERT_EQUAL(v[2], 10);
ASSERT_EQUAL(v[3], 10);
ASSERT_EQUAL_QUIET(v.end(), iter);
iter = thrust::fill_n(v.begin(), v.size(), (float) 20);
ASSERT_EQUAL(v[0], 20);
ASSERT_EQUAL(v[1], 20);
ASSERT_EQUAL(v[2], 20);
ASSERT_EQUAL(v[3], 20);
ASSERT_EQUAL_QUIET(v.end(), iter);
}
DECLARE_VECTOR_UNITTEST(TestFillNMixedTypes);
template <typename T>
void TestFillN(size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
size_t begin_offset = std::min<size_t>(1,n);
thrust::fill_n(h_data.begin() + begin_offset, ::min((size_t)3, n) - begin_offset, (T) 0);
thrust::fill_n(d_data.begin() + begin_offset, ::min((size_t)3, n) - begin_offset, (T) 0);
ASSERT_EQUAL(h_data, d_data);
begin_offset = std::min<size_t>(117, n);
thrust::fill_n(h_data.begin() + begin_offset, ::min((size_t)367, n) - begin_offset, (T) 1);
thrust::fill_n(d_data.begin() + begin_offset, ::min((size_t)367, n) - begin_offset, (T) 1);
ASSERT_EQUAL(h_data, d_data);
begin_offset = std::min<size_t>(8, n);
thrust::fill_n(h_data.begin() + begin_offset, ::min((size_t)259, n) - begin_offset, (T) 2);
thrust::fill_n(d_data.begin() + begin_offset, ::min((size_t)259, n) - begin_offset, (T) 2);
ASSERT_EQUAL(h_data, d_data);
begin_offset = std::min<size_t>(3, n);
thrust::fill_n(h_data.begin() + begin_offset, h_data.size() - begin_offset, (T) 3);
thrust::fill_n(d_data.begin() + begin_offset, d_data.size() - begin_offset, (T) 3);
ASSERT_EQUAL(h_data, d_data);
thrust::fill_n(h_data.begin(), h_data.size(), (T) 4);
thrust::fill_n(d_data.begin(), d_data.size(), (T) 4);
ASSERT_EQUAL(h_data, d_data);
}
DECLARE_VARIABLE_UNITTEST(TestFillN);
template <typename Vector>
void TestFillZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3,T(0));
Vector v2(3,T(0));
Vector v3(3,T(0));
thrust::fill(thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin(),v3.begin())),
thrust::make_zip_iterator(thrust::make_tuple(v1.end(),v2.end(),v3.end())),
thrust::tuple<T,T,T>(4,7,13));
ASSERT_EQUAL(4, v1[0]);
ASSERT_EQUAL(4, v1[1]);
ASSERT_EQUAL(4, v1[2]);
ASSERT_EQUAL(7, v2[0]);
ASSERT_EQUAL(7, v2[1]);
ASSERT_EQUAL(7, v2[2]);
ASSERT_EQUAL(13, v3[0]);
ASSERT_EQUAL(13, v3[1]);
ASSERT_EQUAL(13, v3[2]);
};
DECLARE_VECTOR_UNITTEST(TestFillZipIterator);
void TestFillTuple(void)
{
typedef int T;
typedef thrust::tuple<T,T> Tuple;
thrust::host_vector<Tuple> h(3, Tuple(0,0));
thrust::device_vector<Tuple> d(3, Tuple(0,0));
thrust::fill(h.begin(), h.end(), Tuple(4,7));
thrust::fill(d.begin(), d.end(), Tuple(4,7));
ASSERT_EQUAL_QUIET(h, d);
};
DECLARE_UNITTEST(TestFillTuple);
struct TypeWithTrivialAssigment
{
int x, y, z;
};
void TestFillWithTrivialAssignment(void)
{
typedef TypeWithTrivialAssigment T;
thrust::host_vector<T> h(1);
thrust::device_vector<T> d(1);
ASSERT_EQUAL(h[0].x, 0);
ASSERT_EQUAL(h[0].y, 0);
ASSERT_EQUAL(h[0].z, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).x, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).y, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).z, 0);
T val;
val.x = 10;
val.y = 20;
val.z = -1;
thrust::fill(h.begin(), h.end(), val);
thrust::fill(d.begin(), d.end(), val);
ASSERT_EQUAL(h[0].x, 10);
ASSERT_EQUAL(h[0].y, 20);
ASSERT_EQUAL(h[0].z, -1);
ASSERT_EQUAL(static_cast<T>(d[0]).x, 10);
ASSERT_EQUAL(static_cast<T>(d[0]).y, 20);
ASSERT_EQUAL(static_cast<T>(d[0]).z, -1);
};
DECLARE_UNITTEST(TestFillWithTrivialAssignment);
struct TypeWithNonTrivialAssigment
{
int x, y, z;
__host__ __device__
TypeWithNonTrivialAssigment() : x(0), y(0), z(0) {}
__host__ __device__
TypeWithNonTrivialAssigment& operator=(const TypeWithNonTrivialAssigment& t)
{
x = t.x;
y = t.y;
z = t.x + t.y;
return *this;
}
__host__ __device__
bool operator==(const TypeWithNonTrivialAssigment& t) const
{
return x == t.x && y == t.y && z == t.z;
}
};
void TestFillWithNonTrivialAssignment(void)
{
typedef TypeWithNonTrivialAssigment T;
thrust::host_vector<T> h(1);
thrust::device_vector<T> d(1);
ASSERT_EQUAL(h[0].x, 0);
ASSERT_EQUAL(h[0].y, 0);
ASSERT_EQUAL(h[0].z, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).x, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).y, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).z, 0);
T val;
val.x = 10;
val.y = 20;
val.z = -1;
thrust::fill(h.begin(), h.end(), val);
thrust::fill(d.begin(), d.end(), val);
ASSERT_EQUAL(h[0].x, 10);
ASSERT_EQUAL(h[0].y, 20);
ASSERT_EQUAL(h[0].z, 30);
ASSERT_EQUAL(static_cast<T>(d[0]).x, 10);
ASSERT_EQUAL(static_cast<T>(d[0]).y, 20);
ASSERT_EQUAL(static_cast<T>(d[0]).z, 30);
};
DECLARE_UNITTEST(TestFillWithNonTrivialAssignment);
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
|
3820f687b7f8ba468b77e74008d832df71a3c8d5.cu
|
#include <unittest/unittest.h>
#include <thrust/fill.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/iterator/discard_iterator.h>
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_BEGIN
template <class Vector>
void TestFillSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
thrust::fill(v.begin() + 1, v.begin() + 4, (T) 7);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 7);
ASSERT_EQUAL(v[2], 7);
ASSERT_EQUAL(v[3], 7);
ASSERT_EQUAL(v[4], 4);
thrust::fill(v.begin() + 0, v.begin() + 3, (T) 8);
ASSERT_EQUAL(v[0], 8);
ASSERT_EQUAL(v[1], 8);
ASSERT_EQUAL(v[2], 8);
ASSERT_EQUAL(v[3], 7);
ASSERT_EQUAL(v[4], 4);
thrust::fill(v.begin() + 2, v.end(), (T) 9);
ASSERT_EQUAL(v[0], 8);
ASSERT_EQUAL(v[1], 8);
ASSERT_EQUAL(v[2], 9);
ASSERT_EQUAL(v[3], 9);
ASSERT_EQUAL(v[4], 9);
thrust::fill(v.begin(), v.end(), (T) 1);
ASSERT_EQUAL(v[0], 1);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 1);
ASSERT_EQUAL(v[3], 1);
ASSERT_EQUAL(v[4], 1);
}
DECLARE_VECTOR_UNITTEST(TestFillSimple);
void TestFillDiscardIterator(void)
{
// there's no result to check because fill returns void
thrust::fill(thrust::discard_iterator<thrust::host_space_tag>(),
thrust::discard_iterator<thrust::host_space_tag>(10),
13);
thrust::fill(thrust::discard_iterator<thrust::device_space_tag>(),
thrust::discard_iterator<thrust::device_space_tag>(10),
13);
}
DECLARE_UNITTEST(TestFillDiscardIterator);
template <class Vector>
void TestFillMixedTypes(void)
{
typedef typename Vector::value_type T;
Vector v(4);
thrust::fill(v.begin(), v.end(), (long) 10);
ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(v[1], 10);
ASSERT_EQUAL(v[2], 10);
ASSERT_EQUAL(v[3], 10);
thrust::fill(v.begin(), v.end(), (float) 20);
ASSERT_EQUAL(v[0], 20);
ASSERT_EQUAL(v[1], 20);
ASSERT_EQUAL(v[2], 20);
ASSERT_EQUAL(v[3], 20);
}
DECLARE_VECTOR_UNITTEST(TestFillMixedTypes);
template <typename T>
void TestFill(size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
thrust::fill(h_data.begin() + std::min((size_t)1, n), h_data.begin() + std::min((size_t)3, n), (T) 0);
thrust::fill(d_data.begin() + std::min((size_t)1, n), d_data.begin() + std::min((size_t)3, n), (T) 0);
ASSERT_EQUAL(h_data, d_data);
thrust::fill(h_data.begin() + std::min((size_t)117, n), h_data.begin() + std::min((size_t)367, n), (T) 1);
thrust::fill(d_data.begin() + std::min((size_t)117, n), d_data.begin() + std::min((size_t)367, n), (T) 1);
ASSERT_EQUAL(h_data, d_data);
thrust::fill(h_data.begin() + std::min((size_t)8, n), h_data.begin() + std::min((size_t)259, n), (T) 2);
thrust::fill(d_data.begin() + std::min((size_t)8, n), d_data.begin() + std::min((size_t)259, n), (T) 2);
ASSERT_EQUAL(h_data, d_data);
thrust::fill(h_data.begin() + std::min((size_t)3, n), h_data.end(), (T) 3);
thrust::fill(d_data.begin() + std::min((size_t)3, n), d_data.end(), (T) 3);
ASSERT_EQUAL(h_data, d_data);
thrust::fill(h_data.begin(), h_data.end(), (T) 4);
thrust::fill(d_data.begin(), d_data.end(), (T) 4);
ASSERT_EQUAL(h_data, d_data);
}
DECLARE_VARIABLE_UNITTEST(TestFill);
template <class Vector>
void TestFillNSimple(void)
{
typedef typename Vector::value_type T;
Vector v(5);
v[0] = 0; v[1] = 1; v[2] = 2; v[3] = 3; v[4] = 4;
typename Vector::iterator iter = thrust::fill_n(v.begin() + 1, 3, (T) 7);
ASSERT_EQUAL(v[0], 0);
ASSERT_EQUAL(v[1], 7);
ASSERT_EQUAL(v[2], 7);
ASSERT_EQUAL(v[3], 7);
ASSERT_EQUAL(v[4], 4);
ASSERT_EQUAL_QUIET(v.begin() + 4, iter);
iter = thrust::fill_n(v.begin() + 0, 3, (T) 8);
ASSERT_EQUAL(v[0], 8);
ASSERT_EQUAL(v[1], 8);
ASSERT_EQUAL(v[2], 8);
ASSERT_EQUAL(v[3], 7);
ASSERT_EQUAL(v[4], 4);
ASSERT_EQUAL_QUIET(v.begin() + 3, iter);
iter = thrust::fill_n(v.begin() + 2, 3, (T) 9);
ASSERT_EQUAL(v[0], 8);
ASSERT_EQUAL(v[1], 8);
ASSERT_EQUAL(v[2], 9);
ASSERT_EQUAL(v[3], 9);
ASSERT_EQUAL(v[4], 9);
ASSERT_EQUAL_QUIET(v.end(), iter);
iter = thrust::fill_n(v.begin(), v.size(), (T) 1);
ASSERT_EQUAL(v[0], 1);
ASSERT_EQUAL(v[1], 1);
ASSERT_EQUAL(v[2], 1);
ASSERT_EQUAL(v[3], 1);
ASSERT_EQUAL(v[4], 1);
ASSERT_EQUAL_QUIET(v.end(), iter);
}
DECLARE_VECTOR_UNITTEST(TestFillNSimple);
void TestFillNDiscardIterator(void)
{
thrust::discard_iterator<thrust::host_space_tag> h_result =
thrust::fill_n(thrust::discard_iterator<thrust::host_space_tag>(),
10,
13);
thrust::discard_iterator<thrust::device_space_tag> d_result =
thrust::fill_n(thrust::discard_iterator<thrust::device_space_tag>(),
10,
13);
thrust::discard_iterator<> reference(10);
ASSERT_EQUAL_QUIET(reference, h_result);
ASSERT_EQUAL_QUIET(reference, d_result);
}
DECLARE_UNITTEST(TestFillNDiscardIterator);
template <class Vector>
void TestFillNMixedTypes(void)
{
typedef typename Vector::value_type T;
Vector v(4);
typename Vector::iterator iter = thrust::fill_n(v.begin(), v.size(), (long) 10);
ASSERT_EQUAL(v[0], 10);
ASSERT_EQUAL(v[1], 10);
ASSERT_EQUAL(v[2], 10);
ASSERT_EQUAL(v[3], 10);
ASSERT_EQUAL_QUIET(v.end(), iter);
iter = thrust::fill_n(v.begin(), v.size(), (float) 20);
ASSERT_EQUAL(v[0], 20);
ASSERT_EQUAL(v[1], 20);
ASSERT_EQUAL(v[2], 20);
ASSERT_EQUAL(v[3], 20);
ASSERT_EQUAL_QUIET(v.end(), iter);
}
DECLARE_VECTOR_UNITTEST(TestFillNMixedTypes);
template <typename T>
void TestFillN(size_t n)
{
thrust::host_vector<T> h_data = unittest::random_integers<T>(n);
thrust::device_vector<T> d_data = h_data;
size_t begin_offset = std::min<size_t>(1,n);
thrust::fill_n(h_data.begin() + begin_offset, std::min((size_t)3, n) - begin_offset, (T) 0);
thrust::fill_n(d_data.begin() + begin_offset, std::min((size_t)3, n) - begin_offset, (T) 0);
ASSERT_EQUAL(h_data, d_data);
begin_offset = std::min<size_t>(117, n);
thrust::fill_n(h_data.begin() + begin_offset, std::min((size_t)367, n) - begin_offset, (T) 1);
thrust::fill_n(d_data.begin() + begin_offset, std::min((size_t)367, n) - begin_offset, (T) 1);
ASSERT_EQUAL(h_data, d_data);
begin_offset = std::min<size_t>(8, n);
thrust::fill_n(h_data.begin() + begin_offset, std::min((size_t)259, n) - begin_offset, (T) 2);
thrust::fill_n(d_data.begin() + begin_offset, std::min((size_t)259, n) - begin_offset, (T) 2);
ASSERT_EQUAL(h_data, d_data);
begin_offset = std::min<size_t>(3, n);
thrust::fill_n(h_data.begin() + begin_offset, h_data.size() - begin_offset, (T) 3);
thrust::fill_n(d_data.begin() + begin_offset, d_data.size() - begin_offset, (T) 3);
ASSERT_EQUAL(h_data, d_data);
thrust::fill_n(h_data.begin(), h_data.size(), (T) 4);
thrust::fill_n(d_data.begin(), d_data.size(), (T) 4);
ASSERT_EQUAL(h_data, d_data);
}
DECLARE_VARIABLE_UNITTEST(TestFillN);
template <typename Vector>
void TestFillZipIterator(void)
{
typedef typename Vector::value_type T;
Vector v1(3,T(0));
Vector v2(3,T(0));
Vector v3(3,T(0));
thrust::fill(thrust::make_zip_iterator(thrust::make_tuple(v1.begin(),v2.begin(),v3.begin())),
thrust::make_zip_iterator(thrust::make_tuple(v1.end(),v2.end(),v3.end())),
thrust::tuple<T,T,T>(4,7,13));
ASSERT_EQUAL(4, v1[0]);
ASSERT_EQUAL(4, v1[1]);
ASSERT_EQUAL(4, v1[2]);
ASSERT_EQUAL(7, v2[0]);
ASSERT_EQUAL(7, v2[1]);
ASSERT_EQUAL(7, v2[2]);
ASSERT_EQUAL(13, v3[0]);
ASSERT_EQUAL(13, v3[1]);
ASSERT_EQUAL(13, v3[2]);
};
DECLARE_VECTOR_UNITTEST(TestFillZipIterator);
void TestFillTuple(void)
{
typedef int T;
typedef thrust::tuple<T,T> Tuple;
thrust::host_vector<Tuple> h(3, Tuple(0,0));
thrust::device_vector<Tuple> d(3, Tuple(0,0));
thrust::fill(h.begin(), h.end(), Tuple(4,7));
thrust::fill(d.begin(), d.end(), Tuple(4,7));
ASSERT_EQUAL_QUIET(h, d);
};
DECLARE_UNITTEST(TestFillTuple);
struct TypeWithTrivialAssigment
{
int x, y, z;
};
void TestFillWithTrivialAssignment(void)
{
typedef TypeWithTrivialAssigment T;
thrust::host_vector<T> h(1);
thrust::device_vector<T> d(1);
ASSERT_EQUAL(h[0].x, 0);
ASSERT_EQUAL(h[0].y, 0);
ASSERT_EQUAL(h[0].z, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).x, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).y, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).z, 0);
T val;
val.x = 10;
val.y = 20;
val.z = -1;
thrust::fill(h.begin(), h.end(), val);
thrust::fill(d.begin(), d.end(), val);
ASSERT_EQUAL(h[0].x, 10);
ASSERT_EQUAL(h[0].y, 20);
ASSERT_EQUAL(h[0].z, -1);
ASSERT_EQUAL(static_cast<T>(d[0]).x, 10);
ASSERT_EQUAL(static_cast<T>(d[0]).y, 20);
ASSERT_EQUAL(static_cast<T>(d[0]).z, -1);
};
DECLARE_UNITTEST(TestFillWithTrivialAssignment);
struct TypeWithNonTrivialAssigment
{
int x, y, z;
__host__ __device__
TypeWithNonTrivialAssigment() : x(0), y(0), z(0) {}
__host__ __device__
TypeWithNonTrivialAssigment& operator=(const TypeWithNonTrivialAssigment& t)
{
x = t.x;
y = t.y;
z = t.x + t.y;
return *this;
}
__host__ __device__
bool operator==(const TypeWithNonTrivialAssigment& t) const
{
return x == t.x && y == t.y && z == t.z;
}
};
void TestFillWithNonTrivialAssignment(void)
{
typedef TypeWithNonTrivialAssigment T;
thrust::host_vector<T> h(1);
thrust::device_vector<T> d(1);
ASSERT_EQUAL(h[0].x, 0);
ASSERT_EQUAL(h[0].y, 0);
ASSERT_EQUAL(h[0].z, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).x, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).y, 0);
ASSERT_EQUAL(static_cast<T>(d[0]).z, 0);
T val;
val.x = 10;
val.y = 20;
val.z = -1;
thrust::fill(h.begin(), h.end(), val);
thrust::fill(d.begin(), d.end(), val);
ASSERT_EQUAL(h[0].x, 10);
ASSERT_EQUAL(h[0].y, 20);
ASSERT_EQUAL(h[0].z, 30);
ASSERT_EQUAL(static_cast<T>(d[0]).x, 10);
ASSERT_EQUAL(static_cast<T>(d[0]).y, 20);
ASSERT_EQUAL(static_cast<T>(d[0]).z, 30);
};
DECLARE_UNITTEST(TestFillWithNonTrivialAssignment);
__THRUST_DISABLE_MSVC_POSSIBLE_LOSS_OF_DATA_WARNING_END
|
0e234a49ea03255e4e708694af8c4056aa4ab77b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include <hipcub/hipcub.hpp>
#include <cub/block/block_run_length_decode.cuh>
#include <cub/block/block_store.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <hipcub/hipcub.hpp>
// Has to go after all cub headers. Otherwise, this test won't catch unused
// variables in cub kernels.
#include "catch2_test_helper.h"
/******************************************************************************
* HELPER CLASS FOR RUN-LENGTH DECODING TESTS
******************************************************************************/
/**
* \brief Class template to facilitate testing the BlockRunLengthDecode algorithm for all its
* template parameter specialisations.
*
* \tparam ItemItT The item type being run-length decoded
* \tparam RunLengthsItT Iterator type providing the runs' lengths
* \tparam RUNS_PER_THREAD The number of runs that each thread is getting assigned to
* \tparam DECODED_ITEMS_PER_THREAD The number of run-length decoded items that each thread is
* decoding \tparam TEST_RELATIVE_OFFSETS_ Whether to also retrieve each decoded item's
* relative offset within its run \tparam TEST_RUN_OFFSETS_ Whether to pass in each run's
* offset instead of each run's length \tparam BLOCK_DIM_X The thread block length in
* threads along the X dimension
* \tparam BLOCK_DIM_Y The thread block length in threads along the Y dimension
* \tparam BLOCK_DIM_Z The thread block length in threads along the Z dimension
*/
template <typename ItemItT,
typename RunLengthsItT,
int RUNS_PER_THREAD,
int DECODED_ITEMS_PER_THREAD,
bool TEST_RELATIVE_OFFSETS_,
bool TEST_RUN_OFFSETS_,
int BLOCK_DIM_X,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1>
class AgentTestBlockRunLengthDecode
{
public:
constexpr static uint32_t BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
constexpr static uint32_t RUNS_PER_BLOCK = RUNS_PER_THREAD * BLOCK_THREADS;
constexpr static bool TEST_RELATIVE_OFFSETS = TEST_RELATIVE_OFFSETS_;
private:
using RunItemT = cub::detail::value_t<ItemItT>;
using RunLengthT = cub::detail::value_t<RunLengthsItT>;
using BlockRunOffsetScanT =
hipcub::BlockScan<RunLengthT, BLOCK_DIM_X, cub::BLOCK_SCAN_RAKING, BLOCK_DIM_Y, BLOCK_DIM_Z>;
using BlockRunLengthDecodeT =
cub::BlockRunLengthDecode<RunItemT, BLOCK_DIM_X, RUNS_PER_THREAD, DECODED_ITEMS_PER_THREAD>;
using BlockLoadRunItemT = cub::BlockLoad<RunItemT,
BLOCK_DIM_X,
RUNS_PER_THREAD,
cub::BLOCK_LOAD_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockLoadRunLengthsT = cub::BlockLoad<RunLengthT,
BLOCK_DIM_X,
RUNS_PER_THREAD,
cub::BLOCK_LOAD_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockStoreDecodedItemT = cub::BlockStore<RunItemT,
BLOCK_DIM_X,
DECODED_ITEMS_PER_THREAD,
cub::BLOCK_STORE_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockStoreRelativeOffsetT = cub::BlockStore<RunLengthT,
BLOCK_DIM_X,
DECODED_ITEMS_PER_THREAD,
cub::BLOCK_STORE_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
__device__ __forceinline__ BlockRunLengthDecodeT
InitBlockRunLengthDecode(RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
RunLengthT &decoded_size,
cub::Int2Type<true> /*test_run_offsets*/)
{
RunLengthT run_offsets[RUNS_PER_THREAD];
BlockRunOffsetScanT(temp_storage.run_offsets_scan_storage)
.ExclusiveSum(run_lengths, run_offsets, decoded_size);
// Ensure temporary shared memory can be repurposed
cub::CTA_SYNC();
// Construct BlockRunLengthDecode and initialize with the run offsets
return BlockRunLengthDecodeT(temp_storage.decode.run_length_decode_storage,
unique_items,
run_offsets);
}
__device__ __forceinline__ BlockRunLengthDecodeT
InitBlockRunLengthDecode(RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
RunLengthT &decoded_size,
cub::Int2Type<false> /*test_run_offsets*/)
{
// Construct BlockRunLengthDecode and initialize with the run lengths
return BlockRunLengthDecodeT(temp_storage.decode.run_length_decode_storage,
unique_items,
run_lengths,
decoded_size);
}
__device__ __forceinline__ void LoadRuns(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
size_t num_valid_items)
{
if (num_valid_items < RUNS_PER_BLOCK)
{
BlockLoadRunItemT(temp_storage.load_uniques_storage)
.Load(d_block_unique_items, unique_items, num_valid_items);
}
else
{
BlockLoadRunItemT(temp_storage.load_uniques_storage).Load(d_block_unique_items, unique_items);
}
// Ensure BlockLoad's temporary shared memory can be repurposed
cub::CTA_SYNC();
// Load this block's tile of run lengths
if (num_valid_items < RUNS_PER_BLOCK)
BlockLoadRunLengthsT(temp_storage.load_run_lengths_storage)
.Load(d_block_run_lengths, run_lengths, num_valid_items, static_cast<RunLengthT>(0));
else
BlockLoadRunLengthsT(temp_storage.load_run_lengths_storage)
.Load(d_block_run_lengths, run_lengths);
// Ensure temporary shared memory can be repurposed
cub::CTA_SYNC();
}
public:
union TempStorage
{
typename BlockLoadRunItemT::TempStorage load_uniques_storage;
typename BlockLoadRunLengthsT::TempStorage load_run_lengths_storage;
cub::detail::
conditional_t<TEST_RUN_OFFSETS_, typename BlockRunOffsetScanT::TempStorage, cub::NullType>
run_offsets_scan_storage;
struct
{
typename BlockRunLengthDecodeT::TempStorage run_length_decode_storage;
typename BlockStoreDecodedItemT::TempStorage store_decoded_runs_storage;
typename BlockStoreRelativeOffsetT::TempStorage store_relative_offsets;
} decode;
};
TempStorage &temp_storage;
__device__ __forceinline__ AgentTestBlockRunLengthDecode(TempStorage &temp_storage)
: temp_storage(temp_storage)
{}
/**
* \brief Loads the given block (or tile) of runs, and computes their "decompressed" (run-length
* decoded) size.
*/
__device__ __forceinline__ uint32_t GetDecodedSize(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
size_t num_valid_runs)
{
// Load this block's tile of encoded runs
RunItemT unique_items[RUNS_PER_THREAD];
RunLengthT run_lengths[RUNS_PER_THREAD];
LoadRuns(d_block_unique_items, d_block_run_lengths, unique_items, run_lengths, num_valid_runs);
// Init the BlockRunLengthDecode and get the total decoded size of this block's tile (i.e., the
// "decompressed" size)
uint32_t decoded_size = 0U;
BlockRunLengthDecodeT run_length_decode =
InitBlockRunLengthDecode(unique_items,
run_lengths,
decoded_size,
cub::Int2Type<TEST_RUN_OFFSETS_>());
return decoded_size;
}
/**
* \brief Loads the given block (or tile) of runs, run-length decodes them, and writes the results
* to \p d_block_decoded_out.
*/
template <typename UniqueItemOutItT, typename RelativeOffsetOutItT>
__device__ __forceinline__ uint32_t WriteDecodedRuns(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
UniqueItemOutItT d_block_decoded_out,
RelativeOffsetOutItT d_block_rel_out,
size_t num_valid_runs)
{
// Load this block's tile of encoded runs
RunItemT unique_items[RUNS_PER_THREAD];
RunLengthT run_lengths[RUNS_PER_THREAD];
LoadRuns(d_block_unique_items, d_block_run_lengths, unique_items, run_lengths, num_valid_runs);
// Init the BlockRunLengthDecode and get the total decoded size of this block's tile (i.e., the
// "decompressed" size)
uint32_t decoded_size = 0U;
BlockRunLengthDecodeT run_length_decode =
InitBlockRunLengthDecode(unique_items,
run_lengths,
decoded_size,
cub::Int2Type<TEST_RUN_OFFSETS_>());
// Run-length decode ("decompress") the runs into a window buffer of limited size. This is
// repeated until all runs have been decoded.
uint32_t decoded_window_offset = 0U;
while (decoded_window_offset < decoded_size)
{
RunLengthT relative_offsets[DECODED_ITEMS_PER_THREAD];
RunItemT decoded_items[DECODED_ITEMS_PER_THREAD];
// The number of decoded items that are valid within this window (aka pass) of run-length
// decoding
uint32_t num_valid_items = decoded_size - decoded_window_offset;
run_length_decode.RunLengthDecode(decoded_items, relative_offsets, decoded_window_offset);
BlockStoreDecodedItemT(temp_storage.decode.store_decoded_runs_storage)
.Store(d_block_decoded_out + decoded_window_offset, decoded_items, num_valid_items);
if (TEST_RELATIVE_OFFSETS)
{
BlockStoreRelativeOffsetT(temp_storage.decode.store_relative_offsets)
.Store(d_block_rel_out + decoded_window_offset, relative_offsets, num_valid_items);
}
decoded_window_offset += DECODED_ITEMS_PER_THREAD * BLOCK_THREADS;
}
return decoded_size;
}
};
/******************************************************************************
* [STAGE 1] RUN-LENGTH DECODING TEST KERNEL
******************************************************************************/
template <typename AgentTestBlockRunLengthDecode,
typename ItemItT,
typename RunLengthsItT,
typename OffsetT,
typename DecodedSizesOutT>
__launch_bounds__(AgentTestBlockRunLengthDecode::BLOCK_THREADS) __global__
void BlockRunLengthDecodeGetSizeKernel(const ItemItT d_unique_items,
const RunLengthsItT d_run_lengths,
const OffsetT num_runs,
DecodedSizesOutT d_decoded_sizes)
{
constexpr OffsetT RUNS_PER_BLOCK = AgentTestBlockRunLengthDecode::RUNS_PER_BLOCK;
__shared__ typename AgentTestBlockRunLengthDecode::TempStorage temp_storage;
OffsetT block_offset = blockIdx.x * RUNS_PER_BLOCK;
OffsetT num_valid_runs = (block_offset + RUNS_PER_BLOCK >= num_runs) ? (num_runs - block_offset)
: RUNS_PER_BLOCK;
AgentTestBlockRunLengthDecode run_length_decode_agent(temp_storage);
uint64_t num_decoded_items = run_length_decode_agent.GetDecodedSize(d_unique_items + block_offset,
d_run_lengths + block_offset,
num_valid_runs);
d_decoded_sizes[blockIdx.x] = num_decoded_items;
}
/******************************************************************************
* [STAGE 2] RUN-LENGTH DECODING TEST KERNEL
******************************************************************************/
template <typename AgentTestBlockRunLengthDecode,
typename ItemItT,
typename RunLengthsItT,
typename DecodedSizesOutT,
typename OffsetT,
typename DecodedItemsOutItT,
typename RelativeOffsetOutItT>
__launch_bounds__(AgentTestBlockRunLengthDecode::BLOCK_THREADS) __global__
void BlockRunLengthDecodeTestKernel(const ItemItT d_unique_items,
const RunLengthsItT d_run_lengths,
const DecodedSizesOutT d_decoded_offsets,
const OffsetT num_runs,
DecodedItemsOutItT d_decoded_items,
RelativeOffsetOutItT d_relative_offsets)
{
constexpr OffsetT RUNS_PER_BLOCK = AgentTestBlockRunLengthDecode::RUNS_PER_BLOCK;
__shared__ typename AgentTestBlockRunLengthDecode::TempStorage temp_storage;
OffsetT block_offset = blockIdx.x * RUNS_PER_BLOCK;
OffsetT num_valid_runs = (block_offset + RUNS_PER_BLOCK >= num_runs) ? (num_runs - block_offset)
: RUNS_PER_BLOCK;
AgentTestBlockRunLengthDecode run_length_decode_agent(temp_storage);
run_length_decode_agent.WriteDecodedRuns(d_unique_items + block_offset,
d_run_lengths + block_offset,
d_decoded_items + d_decoded_offsets[blockIdx.x],
d_relative_offsets + d_decoded_offsets[blockIdx.x],
num_valid_runs);
}
struct ModOp
{
using T = uint32_t;
__host__ __device__ __forceinline__ T operator()(const T &x) const { return 1 + (x % 100); }
};
template <uint32_t RUNS_PER_THREAD,
uint32_t DECODED_ITEMS_PER_THREAD,
uint32_t BLOCK_DIM_X,
uint32_t BLOCK_DIM_Y,
uint32_t BLOCK_DIM_Z,
bool TEST_RUN_OFFSETS,
bool TEST_RELATIVE_OFFSETS>
void TestAlgorithmSpecialisation()
{
constexpr uint32_t THREADS_PER_BLOCK = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
constexpr uint32_t RUNS_PER_BLOCK = RUNS_PER_THREAD * THREADS_PER_BLOCK;
using RunItemT = float;
using RunLengthT = uint32_t;
using ItemItT = hipcub::CountingInputIterator<RunItemT>;
using RunLengthsItT =
hipcub::TransformInputIterator<RunLengthT, ModOp, hipcub::CountingInputIterator<RunLengthT>>;
ItemItT d_unique_items(1000U);
RunLengthsItT d_run_lengths(hipcub::CountingInputIterator<RunLengthT>(0), ModOp{});
constexpr uint32_t num_runs = 10000;
constexpr uint32_t num_blocks = (num_runs + (RUNS_PER_BLOCK - 1U)) / RUNS_PER_BLOCK;
size_t temp_storage_bytes = 0ULL;
void *temp_storage = nullptr;
uint32_t *h_num_decoded_total = nullptr;
uint32_t *d_decoded_sizes = nullptr;
uint32_t *d_decoded_offsets = nullptr;
RunItemT *d_decoded_out = nullptr;
RunLengthT *d_relative_offsets = nullptr;
RunItemT *h_decoded_out = nullptr;
RunLengthT *h_relative_offsets = nullptr;
using AgentTestBlockRunLengthDecodeT = AgentTestBlockRunLengthDecode<ItemItT,
RunLengthsItT,
RUNS_PER_THREAD,
DECODED_ITEMS_PER_THREAD,
TEST_RELATIVE_OFFSETS,
TEST_RUN_OFFSETS,
THREADS_PER_BLOCK,
1,
1>;
enum : uint32_t
{
TIMER_SIZE_BEGIN = 0,
TIMER_SIZE_END,
TIMER_DECODE_BEGIN,
TIMER_DECODE_END,
NUM_TIMERS,
};
// Get temporary storage requirements for the scan (for computing offsets for the per-block
// run-length decoded items)
hipcub::DeviceScan::InclusiveSum(nullptr,
temp_storage_bytes,
d_decoded_sizes,
d_decoded_offsets,
num_blocks);
// Allocate device memory
CubDebugExit(hipMalloc(&temp_storage, temp_storage_bytes));
CubDebugExit(hipMalloc(&d_decoded_sizes, num_blocks * sizeof(*d_decoded_sizes)));
// Allocate for the exclusive sum PLUS the overall aggregate
CubDebugExit(hipMalloc(&d_decoded_offsets, (num_blocks + 1) * sizeof(*d_decoded_offsets)));
CubDebugExit(hipHostMalloc(&h_num_decoded_total, sizeof(*h_num_decoded_total)));
// Get the per-block number of items being decoded (i-th thread block writing size to
// d_decoded_sizes[i])
hipLaunchKernelGGL(( BlockRunLengthDecodeGetSizeKernel<AgentTestBlockRunLengthDecodeT>)
, dim3(num_blocks), dim3(THREADS_PER_BLOCK), 0U, 0, d_unique_items,
d_run_lengths,
num_runs,
d_decoded_sizes);
// Compute offsets for the runs decoded by each block (exclusive sum + aggregate)
CubDebugExit(hipMemsetAsync(d_decoded_offsets, 0, sizeof(d_decoded_offsets[0])));
CubDebugExit(hipcub::DeviceScan::InclusiveSum(temp_storage,
temp_storage_bytes,
d_decoded_sizes,
&d_decoded_offsets[1],
num_blocks));
// Copy the total decoded size to CPU in order to allocate just the right amount of device memory
CubDebugExit(hipMemcpy(h_num_decoded_total,
&d_decoded_offsets[num_blocks],
sizeof(*h_num_decoded_total),
hipMemcpyDeviceToHost));
// Allocate device memory for the run-length decoded output
CubDebugExit(hipHostMalloc(&h_decoded_out, (*h_num_decoded_total) * sizeof(RunItemT)));
CubDebugExit(hipMalloc(&d_decoded_out, (*h_num_decoded_total) * sizeof(RunItemT)));
if (TEST_RELATIVE_OFFSETS)
{
CubDebugExit(hipMalloc(&d_relative_offsets, (*h_num_decoded_total) * sizeof(RunLengthT)));
CubDebugExit(hipHostMalloc(&h_relative_offsets, (*h_num_decoded_total) * sizeof(RunLengthT)));
}
// Perform the block-wise run-length decoding (each block taking its offset from
// d_decoded_offsets)
hipLaunchKernelGGL(( BlockRunLengthDecodeTestKernel<AgentTestBlockRunLengthDecodeT>)
, dim3(num_blocks), dim3(THREADS_PER_BLOCK), 0U, 0, d_unique_items,
d_run_lengths,
d_decoded_offsets,
num_runs,
d_decoded_out,
d_relative_offsets);
// Copy back results for verification
CubDebugExit(hipMemcpy(h_decoded_out,
d_decoded_out,
(*h_num_decoded_total) * sizeof(*h_decoded_out),
hipMemcpyDeviceToHost));
if (TEST_RELATIVE_OFFSETS)
{
// Copy back the relative offsets
CubDebugExit(hipMemcpy(h_relative_offsets,
d_relative_offsets,
(*h_num_decoded_total) * sizeof(*h_relative_offsets),
hipMemcpyDeviceToHost));
}
// Generate host-side run-length decoded data for verification
std::vector<std::pair<RunItemT, RunLengthT>> host_golden;
host_golden.reserve(*h_num_decoded_total);
for (uint32_t run = 0; run < num_runs; run++)
{
for (RunLengthT i = 0; i < d_run_lengths[run]; i++)
{
host_golden.push_back({d_unique_items[run], i});
}
}
// Verify the total run-length decoded size is correct
REQUIRE(host_golden.size() == h_num_decoded_total[0]);
// Verify the run-length decoded data is correct
bool cmp_eq = true;
for (uint32_t i = 0; i < host_golden.size(); i++)
{
if (host_golden[i].first != h_decoded_out[i])
{
FAIL("Mismatch at #" << i << ": CPU item: " << host_golden[i].first
<< ", GPU: " << h_decoded_out[i] << "\n");
cmp_eq = false;
}
if (TEST_RELATIVE_OFFSETS)
{
if (host_golden[i].second != h_relative_offsets[i])
{
FAIL("Mismatch of relative offset at #" << i
<< ": CPU item: " << host_golden[i].first << ", GPU: " << h_decoded_out[i]
<< "; relative offsets: CPU: " << host_golden[i].second
<< ", GPU: " << h_relative_offsets[i] << "\n");
cmp_eq = false;
break;
}
}
}
REQUIRE(cmp_eq == true);
// Clean up memory allocations
CubDebugExit(hipFree(temp_storage));
CubDebugExit(hipFree(d_decoded_sizes));
CubDebugExit(hipFree(d_decoded_offsets));
CubDebugExit(hipFree(d_decoded_out));
CubDebugExit(hipHostFree(h_num_decoded_total));
CubDebugExit(hipHostFree(h_decoded_out));
if (TEST_RELATIVE_OFFSETS)
{
CubDebugExit(hipFree(d_relative_offsets));
CubDebugExit(hipHostFree(h_relative_offsets));
}
}
constexpr bool DO_TEST_RELATIVE_OFFSETS = true;
constexpr bool DO_NOT_TEST_RELATIVE_OFFSETS = false;
constexpr bool TEST_WITH_RUN_OFFSETS = true;
constexpr bool TEST_WITH_RUN_LENGTHS = false;
template <int RunsPerThread,
int DecodedItemsPerThread,
int BlockDimX,
int BlockDimY = 1,
int BlockDimZ = 1>
struct params_t
{
static constexpr int runs_per_thread = RunsPerThread;
static constexpr int decoded_items_per_thread = DecodedItemsPerThread;
static constexpr int block_dim_x = BlockDimX;
static constexpr int block_dim_y = BlockDimY;
static constexpr int block_dim_z = BlockDimZ;
};
CUB_TEST_LIST("Block Run Length Decode works with run lengths and offsets relative to each run",
"[rld][block]",
params_t<1, 1, 64>,
params_t<1, 3, 32, 2, 3>,
params_t<1, 1, 128>,
params_t<1, 8, 128>,
params_t<3, 1, 256>,
params_t<1, 8, 256>,
params_t<8, 1, 256>,
params_t<1, 1, 256>,
params_t<2, 2, 384>)
{
using params = TestType;
TestAlgorithmSpecialisation<params::runs_per_thread,
params::decoded_items_per_thread,
params::block_dim_x,
params::block_dim_y,
params::block_dim_z,
TEST_WITH_RUN_LENGTHS,
DO_TEST_RELATIVE_OFFSETS>();
}
CUB_TEST_LIST("Block Run Length Decode works with run lengths and performs normal run-length "
"decoding",
"[rld][block]",
params_t<1, 1, 64>,
params_t<1, 3, 32, 2, 3>,
params_t<1, 1, 128>,
params_t<1, 8, 128>,
params_t<3, 1, 256>,
params_t<1, 8, 256>,
params_t<8, 1, 256>,
params_t<1, 1, 256>,
params_t<2, 2, 384>)
{
using params = TestType;
TestAlgorithmSpecialisation<params::runs_per_thread,
params::decoded_items_per_thread,
params::block_dim_x,
params::block_dim_y,
params::block_dim_z,
TEST_WITH_RUN_LENGTHS,
DO_NOT_TEST_RELATIVE_OFFSETS>();
}
CUB_TEST_LIST("Block Run Length Decode works with run offsets and generates offsets relative to "
"each run",
"[rld][block]",
params_t<1, 1, 64>,
params_t<1, 3, 32, 2, 3>,
params_t<1, 1, 128>,
params_t<1, 8, 128>,
params_t<3, 1, 256>,
params_t<1, 8, 256>,
params_t<8, 1, 256>,
params_t<1, 1, 256>,
params_t<2, 2, 384>)
{
using params = TestType;
TestAlgorithmSpecialisation<params::runs_per_thread,
params::decoded_items_per_thread,
params::block_dim_x,
params::block_dim_y,
params::block_dim_z,
TEST_WITH_RUN_OFFSETS,
DO_TEST_RELATIVE_OFFSETS>();
}
CUB_TEST_LIST("Block Run Length Decode works with run offsets and performs normal run-length "
"decoding",
"[rld][block]",
params_t<1, 1, 64>,
params_t<1, 3, 32, 2, 3>,
params_t<1, 1, 128>,
params_t<1, 8, 128>,
params_t<3, 1, 256>,
params_t<1, 8, 256>,
params_t<8, 1, 256>,
params_t<1, 1, 256>,
params_t<2, 2, 384>)
{
using params = TestType;
TestAlgorithmSpecialisation<params::runs_per_thread,
params::decoded_items_per_thread,
params::block_dim_x,
params::block_dim_y,
params::block_dim_z,
TEST_WITH_RUN_OFFSETS,
DO_NOT_TEST_RELATIVE_OFFSETS>();
}
|
0e234a49ea03255e4e708694af8c4056aa4ab77b.cu
|
/******************************************************************************
* Copyright (c) 2011-2022, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
#include <cub/block/block_load.cuh>
#include <cub/block/block_run_length_decode.cuh>
#include <cub/block/block_store.cuh>
#include <cub/device/device_scan.cuh>
#include <cub/iterator/counting_input_iterator.cuh>
#include <cub/iterator/transform_input_iterator.cuh>
#include <cub/util_allocator.cuh>
// Has to go after all cub headers. Otherwise, this test won't catch unused
// variables in cub kernels.
#include "catch2_test_helper.h"
/******************************************************************************
* HELPER CLASS FOR RUN-LENGTH DECODING TESTS
******************************************************************************/
/**
* \brief Class template to facilitate testing the BlockRunLengthDecode algorithm for all its
* template parameter specialisations.
*
* \tparam ItemItT The item type being run-length decoded
* \tparam RunLengthsItT Iterator type providing the runs' lengths
* \tparam RUNS_PER_THREAD The number of runs that each thread is getting assigned to
* \tparam DECODED_ITEMS_PER_THREAD The number of run-length decoded items that each thread is
* decoding \tparam TEST_RELATIVE_OFFSETS_ Whether to also retrieve each decoded item's
* relative offset within its run \tparam TEST_RUN_OFFSETS_ Whether to pass in each run's
* offset instead of each run's length \tparam BLOCK_DIM_X The thread block length in
* threads along the X dimension
* \tparam BLOCK_DIM_Y The thread block length in threads along the Y dimension
* \tparam BLOCK_DIM_Z The thread block length in threads along the Z dimension
*/
template <typename ItemItT,
typename RunLengthsItT,
int RUNS_PER_THREAD,
int DECODED_ITEMS_PER_THREAD,
bool TEST_RELATIVE_OFFSETS_,
bool TEST_RUN_OFFSETS_,
int BLOCK_DIM_X,
int BLOCK_DIM_Y = 1,
int BLOCK_DIM_Z = 1>
class AgentTestBlockRunLengthDecode
{
public:
constexpr static uint32_t BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
constexpr static uint32_t RUNS_PER_BLOCK = RUNS_PER_THREAD * BLOCK_THREADS;
constexpr static bool TEST_RELATIVE_OFFSETS = TEST_RELATIVE_OFFSETS_;
private:
using RunItemT = cub::detail::value_t<ItemItT>;
using RunLengthT = cub::detail::value_t<RunLengthsItT>;
using BlockRunOffsetScanT =
cub::BlockScan<RunLengthT, BLOCK_DIM_X, cub::BLOCK_SCAN_RAKING, BLOCK_DIM_Y, BLOCK_DIM_Z>;
using BlockRunLengthDecodeT =
cub::BlockRunLengthDecode<RunItemT, BLOCK_DIM_X, RUNS_PER_THREAD, DECODED_ITEMS_PER_THREAD>;
using BlockLoadRunItemT = cub::BlockLoad<RunItemT,
BLOCK_DIM_X,
RUNS_PER_THREAD,
cub::BLOCK_LOAD_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockLoadRunLengthsT = cub::BlockLoad<RunLengthT,
BLOCK_DIM_X,
RUNS_PER_THREAD,
cub::BLOCK_LOAD_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockStoreDecodedItemT = cub::BlockStore<RunItemT,
BLOCK_DIM_X,
DECODED_ITEMS_PER_THREAD,
cub::BLOCK_STORE_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
using BlockStoreRelativeOffsetT = cub::BlockStore<RunLengthT,
BLOCK_DIM_X,
DECODED_ITEMS_PER_THREAD,
cub::BLOCK_STORE_WARP_TRANSPOSE,
BLOCK_DIM_Y,
BLOCK_DIM_Z>;
__device__ __forceinline__ BlockRunLengthDecodeT
InitBlockRunLengthDecode(RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
RunLengthT &decoded_size,
cub::Int2Type<true> /*test_run_offsets*/)
{
RunLengthT run_offsets[RUNS_PER_THREAD];
BlockRunOffsetScanT(temp_storage.run_offsets_scan_storage)
.ExclusiveSum(run_lengths, run_offsets, decoded_size);
// Ensure temporary shared memory can be repurposed
cub::CTA_SYNC();
// Construct BlockRunLengthDecode and initialize with the run offsets
return BlockRunLengthDecodeT(temp_storage.decode.run_length_decode_storage,
unique_items,
run_offsets);
}
__device__ __forceinline__ BlockRunLengthDecodeT
InitBlockRunLengthDecode(RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
RunLengthT &decoded_size,
cub::Int2Type<false> /*test_run_offsets*/)
{
// Construct BlockRunLengthDecode and initialize with the run lengths
return BlockRunLengthDecodeT(temp_storage.decode.run_length_decode_storage,
unique_items,
run_lengths,
decoded_size);
}
__device__ __forceinline__ void LoadRuns(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
RunItemT (&unique_items)[RUNS_PER_THREAD],
RunLengthT (&run_lengths)[RUNS_PER_THREAD],
size_t num_valid_items)
{
if (num_valid_items < RUNS_PER_BLOCK)
{
BlockLoadRunItemT(temp_storage.load_uniques_storage)
.Load(d_block_unique_items, unique_items, num_valid_items);
}
else
{
BlockLoadRunItemT(temp_storage.load_uniques_storage).Load(d_block_unique_items, unique_items);
}
// Ensure BlockLoad's temporary shared memory can be repurposed
cub::CTA_SYNC();
// Load this block's tile of run lengths
if (num_valid_items < RUNS_PER_BLOCK)
BlockLoadRunLengthsT(temp_storage.load_run_lengths_storage)
.Load(d_block_run_lengths, run_lengths, num_valid_items, static_cast<RunLengthT>(0));
else
BlockLoadRunLengthsT(temp_storage.load_run_lengths_storage)
.Load(d_block_run_lengths, run_lengths);
// Ensure temporary shared memory can be repurposed
cub::CTA_SYNC();
}
public:
union TempStorage
{
typename BlockLoadRunItemT::TempStorage load_uniques_storage;
typename BlockLoadRunLengthsT::TempStorage load_run_lengths_storage;
cub::detail::
conditional_t<TEST_RUN_OFFSETS_, typename BlockRunOffsetScanT::TempStorage, cub::NullType>
run_offsets_scan_storage;
struct
{
typename BlockRunLengthDecodeT::TempStorage run_length_decode_storage;
typename BlockStoreDecodedItemT::TempStorage store_decoded_runs_storage;
typename BlockStoreRelativeOffsetT::TempStorage store_relative_offsets;
} decode;
};
TempStorage &temp_storage;
__device__ __forceinline__ AgentTestBlockRunLengthDecode(TempStorage &temp_storage)
: temp_storage(temp_storage)
{}
/**
* \brief Loads the given block (or tile) of runs, and computes their "decompressed" (run-length
* decoded) size.
*/
__device__ __forceinline__ uint32_t GetDecodedSize(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
size_t num_valid_runs)
{
// Load this block's tile of encoded runs
RunItemT unique_items[RUNS_PER_THREAD];
RunLengthT run_lengths[RUNS_PER_THREAD];
LoadRuns(d_block_unique_items, d_block_run_lengths, unique_items, run_lengths, num_valid_runs);
// Init the BlockRunLengthDecode and get the total decoded size of this block's tile (i.e., the
// "decompressed" size)
uint32_t decoded_size = 0U;
BlockRunLengthDecodeT run_length_decode =
InitBlockRunLengthDecode(unique_items,
run_lengths,
decoded_size,
cub::Int2Type<TEST_RUN_OFFSETS_>());
return decoded_size;
}
/**
* \brief Loads the given block (or tile) of runs, run-length decodes them, and writes the results
* to \p d_block_decoded_out.
*/
template <typename UniqueItemOutItT, typename RelativeOffsetOutItT>
__device__ __forceinline__ uint32_t WriteDecodedRuns(ItemItT d_block_unique_items,
RunLengthsItT d_block_run_lengths,
UniqueItemOutItT d_block_decoded_out,
RelativeOffsetOutItT d_block_rel_out,
size_t num_valid_runs)
{
// Load this block's tile of encoded runs
RunItemT unique_items[RUNS_PER_THREAD];
RunLengthT run_lengths[RUNS_PER_THREAD];
LoadRuns(d_block_unique_items, d_block_run_lengths, unique_items, run_lengths, num_valid_runs);
// Init the BlockRunLengthDecode and get the total decoded size of this block's tile (i.e., the
// "decompressed" size)
uint32_t decoded_size = 0U;
BlockRunLengthDecodeT run_length_decode =
InitBlockRunLengthDecode(unique_items,
run_lengths,
decoded_size,
cub::Int2Type<TEST_RUN_OFFSETS_>());
// Run-length decode ("decompress") the runs into a window buffer of limited size. This is
// repeated until all runs have been decoded.
uint32_t decoded_window_offset = 0U;
while (decoded_window_offset < decoded_size)
{
RunLengthT relative_offsets[DECODED_ITEMS_PER_THREAD];
RunItemT decoded_items[DECODED_ITEMS_PER_THREAD];
// The number of decoded items that are valid within this window (aka pass) of run-length
// decoding
uint32_t num_valid_items = decoded_size - decoded_window_offset;
run_length_decode.RunLengthDecode(decoded_items, relative_offsets, decoded_window_offset);
BlockStoreDecodedItemT(temp_storage.decode.store_decoded_runs_storage)
.Store(d_block_decoded_out + decoded_window_offset, decoded_items, num_valid_items);
if (TEST_RELATIVE_OFFSETS)
{
BlockStoreRelativeOffsetT(temp_storage.decode.store_relative_offsets)
.Store(d_block_rel_out + decoded_window_offset, relative_offsets, num_valid_items);
}
decoded_window_offset += DECODED_ITEMS_PER_THREAD * BLOCK_THREADS;
}
return decoded_size;
}
};
/******************************************************************************
* [STAGE 1] RUN-LENGTH DECODING TEST KERNEL
******************************************************************************/
template <typename AgentTestBlockRunLengthDecode,
typename ItemItT,
typename RunLengthsItT,
typename OffsetT,
typename DecodedSizesOutT>
__launch_bounds__(AgentTestBlockRunLengthDecode::BLOCK_THREADS) __global__
void BlockRunLengthDecodeGetSizeKernel(const ItemItT d_unique_items,
const RunLengthsItT d_run_lengths,
const OffsetT num_runs,
DecodedSizesOutT d_decoded_sizes)
{
constexpr OffsetT RUNS_PER_BLOCK = AgentTestBlockRunLengthDecode::RUNS_PER_BLOCK;
__shared__ typename AgentTestBlockRunLengthDecode::TempStorage temp_storage;
OffsetT block_offset = blockIdx.x * RUNS_PER_BLOCK;
OffsetT num_valid_runs = (block_offset + RUNS_PER_BLOCK >= num_runs) ? (num_runs - block_offset)
: RUNS_PER_BLOCK;
AgentTestBlockRunLengthDecode run_length_decode_agent(temp_storage);
uint64_t num_decoded_items = run_length_decode_agent.GetDecodedSize(d_unique_items + block_offset,
d_run_lengths + block_offset,
num_valid_runs);
d_decoded_sizes[blockIdx.x] = num_decoded_items;
}
/******************************************************************************
* [STAGE 2] RUN-LENGTH DECODING TEST KERNEL
******************************************************************************/
template <typename AgentTestBlockRunLengthDecode,
typename ItemItT,
typename RunLengthsItT,
typename DecodedSizesOutT,
typename OffsetT,
typename DecodedItemsOutItT,
typename RelativeOffsetOutItT>
__launch_bounds__(AgentTestBlockRunLengthDecode::BLOCK_THREADS) __global__
void BlockRunLengthDecodeTestKernel(const ItemItT d_unique_items,
const RunLengthsItT d_run_lengths,
const DecodedSizesOutT d_decoded_offsets,
const OffsetT num_runs,
DecodedItemsOutItT d_decoded_items,
RelativeOffsetOutItT d_relative_offsets)
{
constexpr OffsetT RUNS_PER_BLOCK = AgentTestBlockRunLengthDecode::RUNS_PER_BLOCK;
__shared__ typename AgentTestBlockRunLengthDecode::TempStorage temp_storage;
OffsetT block_offset = blockIdx.x * RUNS_PER_BLOCK;
OffsetT num_valid_runs = (block_offset + RUNS_PER_BLOCK >= num_runs) ? (num_runs - block_offset)
: RUNS_PER_BLOCK;
AgentTestBlockRunLengthDecode run_length_decode_agent(temp_storage);
run_length_decode_agent.WriteDecodedRuns(d_unique_items + block_offset,
d_run_lengths + block_offset,
d_decoded_items + d_decoded_offsets[blockIdx.x],
d_relative_offsets + d_decoded_offsets[blockIdx.x],
num_valid_runs);
}
struct ModOp
{
using T = uint32_t;
__host__ __device__ __forceinline__ T operator()(const T &x) const { return 1 + (x % 100); }
};
template <uint32_t RUNS_PER_THREAD,
uint32_t DECODED_ITEMS_PER_THREAD,
uint32_t BLOCK_DIM_X,
uint32_t BLOCK_DIM_Y,
uint32_t BLOCK_DIM_Z,
bool TEST_RUN_OFFSETS,
bool TEST_RELATIVE_OFFSETS>
void TestAlgorithmSpecialisation()
{
constexpr uint32_t THREADS_PER_BLOCK = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
constexpr uint32_t RUNS_PER_BLOCK = RUNS_PER_THREAD * THREADS_PER_BLOCK;
using RunItemT = float;
using RunLengthT = uint32_t;
using ItemItT = cub::CountingInputIterator<RunItemT>;
using RunLengthsItT =
cub::TransformInputIterator<RunLengthT, ModOp, cub::CountingInputIterator<RunLengthT>>;
ItemItT d_unique_items(1000U);
RunLengthsItT d_run_lengths(cub::CountingInputIterator<RunLengthT>(0), ModOp{});
constexpr uint32_t num_runs = 10000;
constexpr uint32_t num_blocks = (num_runs + (RUNS_PER_BLOCK - 1U)) / RUNS_PER_BLOCK;
size_t temp_storage_bytes = 0ULL;
void *temp_storage = nullptr;
uint32_t *h_num_decoded_total = nullptr;
uint32_t *d_decoded_sizes = nullptr;
uint32_t *d_decoded_offsets = nullptr;
RunItemT *d_decoded_out = nullptr;
RunLengthT *d_relative_offsets = nullptr;
RunItemT *h_decoded_out = nullptr;
RunLengthT *h_relative_offsets = nullptr;
using AgentTestBlockRunLengthDecodeT = AgentTestBlockRunLengthDecode<ItemItT,
RunLengthsItT,
RUNS_PER_THREAD,
DECODED_ITEMS_PER_THREAD,
TEST_RELATIVE_OFFSETS,
TEST_RUN_OFFSETS,
THREADS_PER_BLOCK,
1,
1>;
enum : uint32_t
{
TIMER_SIZE_BEGIN = 0,
TIMER_SIZE_END,
TIMER_DECODE_BEGIN,
TIMER_DECODE_END,
NUM_TIMERS,
};
// Get temporary storage requirements for the scan (for computing offsets for the per-block
// run-length decoded items)
cub::DeviceScan::InclusiveSum(nullptr,
temp_storage_bytes,
d_decoded_sizes,
d_decoded_offsets,
num_blocks);
// Allocate device memory
CubDebugExit(cudaMalloc(&temp_storage, temp_storage_bytes));
CubDebugExit(cudaMalloc(&d_decoded_sizes, num_blocks * sizeof(*d_decoded_sizes)));
// Allocate for the exclusive sum PLUS the overall aggregate
CubDebugExit(cudaMalloc(&d_decoded_offsets, (num_blocks + 1) * sizeof(*d_decoded_offsets)));
CubDebugExit(cudaMallocHost(&h_num_decoded_total, sizeof(*h_num_decoded_total)));
// Get the per-block number of items being decoded (i-th thread block writing size to
// d_decoded_sizes[i])
BlockRunLengthDecodeGetSizeKernel<AgentTestBlockRunLengthDecodeT>
<<<num_blocks, THREADS_PER_BLOCK, 0U>>>(d_unique_items,
d_run_lengths,
num_runs,
d_decoded_sizes);
// Compute offsets for the runs decoded by each block (exclusive sum + aggregate)
CubDebugExit(cudaMemsetAsync(d_decoded_offsets, 0, sizeof(d_decoded_offsets[0])));
CubDebugExit(cub::DeviceScan::InclusiveSum(temp_storage,
temp_storage_bytes,
d_decoded_sizes,
&d_decoded_offsets[1],
num_blocks));
// Copy the total decoded size to CPU in order to allocate just the right amount of device memory
CubDebugExit(cudaMemcpy(h_num_decoded_total,
&d_decoded_offsets[num_blocks],
sizeof(*h_num_decoded_total),
cudaMemcpyDeviceToHost));
// Allocate device memory for the run-length decoded output
CubDebugExit(cudaMallocHost(&h_decoded_out, (*h_num_decoded_total) * sizeof(RunItemT)));
CubDebugExit(cudaMalloc(&d_decoded_out, (*h_num_decoded_total) * sizeof(RunItemT)));
if (TEST_RELATIVE_OFFSETS)
{
CubDebugExit(cudaMalloc(&d_relative_offsets, (*h_num_decoded_total) * sizeof(RunLengthT)));
CubDebugExit(cudaMallocHost(&h_relative_offsets, (*h_num_decoded_total) * sizeof(RunLengthT)));
}
// Perform the block-wise run-length decoding (each block taking its offset from
// d_decoded_offsets)
BlockRunLengthDecodeTestKernel<AgentTestBlockRunLengthDecodeT>
<<<num_blocks, THREADS_PER_BLOCK, 0U>>>(d_unique_items,
d_run_lengths,
d_decoded_offsets,
num_runs,
d_decoded_out,
d_relative_offsets);
// Copy back results for verification
CubDebugExit(cudaMemcpy(h_decoded_out,
d_decoded_out,
(*h_num_decoded_total) * sizeof(*h_decoded_out),
cudaMemcpyDeviceToHost));
if (TEST_RELATIVE_OFFSETS)
{
// Copy back the relative offsets
CubDebugExit(cudaMemcpy(h_relative_offsets,
d_relative_offsets,
(*h_num_decoded_total) * sizeof(*h_relative_offsets),
cudaMemcpyDeviceToHost));
}
// Generate host-side run-length decoded data for verification
std::vector<std::pair<RunItemT, RunLengthT>> host_golden;
host_golden.reserve(*h_num_decoded_total);
for (uint32_t run = 0; run < num_runs; run++)
{
for (RunLengthT i = 0; i < d_run_lengths[run]; i++)
{
host_golden.push_back({d_unique_items[run], i});
}
}
// Verify the total run-length decoded size is correct
REQUIRE(host_golden.size() == h_num_decoded_total[0]);
// Verify the run-length decoded data is correct
bool cmp_eq = true;
for (uint32_t i = 0; i < host_golden.size(); i++)
{
if (host_golden[i].first != h_decoded_out[i])
{
FAIL("Mismatch at #" << i << ": CPU item: " << host_golden[i].first
<< ", GPU: " << h_decoded_out[i] << "\n");
cmp_eq = false;
}
if (TEST_RELATIVE_OFFSETS)
{
if (host_golden[i].second != h_relative_offsets[i])
{
FAIL("Mismatch of relative offset at #" << i
<< ": CPU item: " << host_golden[i].first << ", GPU: " << h_decoded_out[i]
<< "; relative offsets: CPU: " << host_golden[i].second
<< ", GPU: " << h_relative_offsets[i] << "\n");
cmp_eq = false;
break;
}
}
}
REQUIRE(cmp_eq == true);
// Clean up memory allocations
CubDebugExit(cudaFree(temp_storage));
CubDebugExit(cudaFree(d_decoded_sizes));
CubDebugExit(cudaFree(d_decoded_offsets));
CubDebugExit(cudaFree(d_decoded_out));
CubDebugExit(cudaFreeHost(h_num_decoded_total));
CubDebugExit(cudaFreeHost(h_decoded_out));
if (TEST_RELATIVE_OFFSETS)
{
CubDebugExit(cudaFree(d_relative_offsets));
CubDebugExit(cudaFreeHost(h_relative_offsets));
}
}
constexpr bool DO_TEST_RELATIVE_OFFSETS = true;
constexpr bool DO_NOT_TEST_RELATIVE_OFFSETS = false;
constexpr bool TEST_WITH_RUN_OFFSETS = true;
constexpr bool TEST_WITH_RUN_LENGTHS = false;
template <int RunsPerThread,
int DecodedItemsPerThread,
int BlockDimX,
int BlockDimY = 1,
int BlockDimZ = 1>
struct params_t
{
static constexpr int runs_per_thread = RunsPerThread;
static constexpr int decoded_items_per_thread = DecodedItemsPerThread;
static constexpr int block_dim_x = BlockDimX;
static constexpr int block_dim_y = BlockDimY;
static constexpr int block_dim_z = BlockDimZ;
};
CUB_TEST_LIST("Block Run Length Decode works with run lengths and offsets relative to each run",
"[rld][block]",
params_t<1, 1, 64>,
params_t<1, 3, 32, 2, 3>,
params_t<1, 1, 128>,
params_t<1, 8, 128>,
params_t<3, 1, 256>,
params_t<1, 8, 256>,
params_t<8, 1, 256>,
params_t<1, 1, 256>,
params_t<2, 2, 384>)
{
using params = TestType;
TestAlgorithmSpecialisation<params::runs_per_thread,
params::decoded_items_per_thread,
params::block_dim_x,
params::block_dim_y,
params::block_dim_z,
TEST_WITH_RUN_LENGTHS,
DO_TEST_RELATIVE_OFFSETS>();
}
CUB_TEST_LIST("Block Run Length Decode works with run lengths and performs normal run-length "
"decoding",
"[rld][block]",
params_t<1, 1, 64>,
params_t<1, 3, 32, 2, 3>,
params_t<1, 1, 128>,
params_t<1, 8, 128>,
params_t<3, 1, 256>,
params_t<1, 8, 256>,
params_t<8, 1, 256>,
params_t<1, 1, 256>,
params_t<2, 2, 384>)
{
using params = TestType;
TestAlgorithmSpecialisation<params::runs_per_thread,
params::decoded_items_per_thread,
params::block_dim_x,
params::block_dim_y,
params::block_dim_z,
TEST_WITH_RUN_LENGTHS,
DO_NOT_TEST_RELATIVE_OFFSETS>();
}
CUB_TEST_LIST("Block Run Length Decode works with run offsets and generates offsets relative to "
"each run",
"[rld][block]",
params_t<1, 1, 64>,
params_t<1, 3, 32, 2, 3>,
params_t<1, 1, 128>,
params_t<1, 8, 128>,
params_t<3, 1, 256>,
params_t<1, 8, 256>,
params_t<8, 1, 256>,
params_t<1, 1, 256>,
params_t<2, 2, 384>)
{
using params = TestType;
TestAlgorithmSpecialisation<params::runs_per_thread,
params::decoded_items_per_thread,
params::block_dim_x,
params::block_dim_y,
params::block_dim_z,
TEST_WITH_RUN_OFFSETS,
DO_TEST_RELATIVE_OFFSETS>();
}
CUB_TEST_LIST("Block Run Length Decode works with run offsets and performs normal run-length "
"decoding",
"[rld][block]",
params_t<1, 1, 64>,
params_t<1, 3, 32, 2, 3>,
params_t<1, 1, 128>,
params_t<1, 8, 128>,
params_t<3, 1, 256>,
params_t<1, 8, 256>,
params_t<8, 1, 256>,
params_t<1, 1, 256>,
params_t<2, 2, 384>)
{
using params = TestType;
TestAlgorithmSpecialisation<params::runs_per_thread,
params::decoded_items_per_thread,
params::block_dim_x,
params::block_dim_y,
params::block_dim_z,
TEST_WITH_RUN_OFFSETS,
DO_NOT_TEST_RELATIVE_OFFSETS>();
}
|
1746fe93205a56a294aeeb2fdd42025734006d55.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample queries the properties of the CUDA devices present in the system
* via CUDA Runtime API. */
// std::system includes
#include <iostream>
#include <memory>
#include "helper_cuda.h"
#include <string>
#if CUDART_VERSION < 5000
// CUDA-C includes
#include <hip/hip_runtime.h>
// This function wraps the CUDA Driver API into a template function
template <class T>
inline void getCudaAttribute(T *attribute, hipDeviceAttribute_t device_attribute,
int device) {
hipError_t error = hipDeviceGetAttribute(attribute, device_attribute, device);
if (hipSuccess != error) {
fprintf(
stderr,
"cuSafeCallNoSync() Driver API error = %04d from file <%s>, line %i.\n",
error, __FILE__, __LINE__);
//exit(EXIT_FAILURE);
//system("pause");
}
}
#endif /* CUDART_VERSION < 5000 */
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
void deviceQuery() {
printf(" Starting...\n\n");
printf(
" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n");
int deviceCount = 0;
hipError_t error_id = hipGetDeviceCount(&deviceCount);
if (error_id != hipSuccess) {
printf("hipGetDeviceCount returned %d\n-> %s\n",
static_cast<int>(error_id), hipGetErrorString(error_id));
printf("Result = FAIL\n");
//exit(EXIT_FAILURE);
//system("pause");
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
for (dev = 0; dev < deviceCount; ++dev) {
hipSetDevice(dev);
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
// Console log
hipDriverGetVersion(&driverVersion);
hipRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
char msg[256];
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(msg, sizeof(msg),
" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f),
(unsigned long long)deviceProp.totalGlobalMem);
#else
snprintf(msg, sizeof(msg),
" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f),
(unsigned long long)deviceProp.totalGlobalMem);
#endif
printf("%s", msg);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) *
deviceProp.multiProcessorCount);
printf(
" GPU Max Clock rate: %.0f MHz (%0.2f "
"GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 5000
// This is supported in CUDA 5.0 (runtime API device properties)
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize) {
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
#else
// This only available in CUDA 4.0-4.2 (but these were only exposed in the
// CUDA Driver API)
int memoryClock;
getCudaAttribute<int>(&memoryClock, hipDeviceAttributeMemoryClockRate,
dev);
printf(" Memory Clock rate: %.0f Mhz\n",
memoryClock * 1e-3f);
int memBusWidth;
getCudaAttribute<int>(&memBusWidth,
hipDeviceAttributeMemoryBusWidth, dev);
printf(" Memory Bus Width: %d-bit\n",
memBusWidth);
int L2CacheSize;
getCudaAttribute<int>(&L2CacheSize, hipDeviceAttributeL2CacheSize, dev);
if (L2CacheSize) {
printf(" L2 Cache Size: %d bytes\n",
L2CacheSize);
}
#endif
printf(
" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, "
"%d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0],
deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0],
deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(
" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
printf(
" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d "
"layers\n",
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %zu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %zu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %zu bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %zu bytes\n",
deviceProp.textureAlignment);
printf(
" Concurrent copy and kernel execution: %s with %d copy "
"engine(s)\n",
(deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
printf(" Run time limit on kernels: %s\n",
deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n",
deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n",
deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n",
deviceProp.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support: %s\n",
deviceProp.ECCEnabled ? "Enabled" : "Disabled");
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n",
deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)"
: "WDDM (Windows Display Driver Model)");
#endif
printf(" Device supports Unified Addressing (UVA): %s\n",
deviceProp.unifiedAddressing ? "Yes" : "No");
printf(" Device supports Compute Preemption: %s\n",
deviceProp.computePreemptionSupported ? "Yes" : "No");
printf(" Supports Cooperative Kernel Launch: %s\n",
deviceProp.cooperativeLaunch ? "Yes" : "No");
printf(" Supports MultiDevice Co-op Kernel Launch: %s\n",
deviceProp.cooperativeMultiDeviceLaunch ? "Yes" : "No");
printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n",
deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);
const char *sComputeMode[] = {
"Default (multiple host threads can use ::hipSetDevice() with device "
"simultaneously)",
"Exclusive (only one host thread in one process is able to use "
"::hipSetDevice() with this device)",
"Prohibited (no host thread can use ::hipSetDevice() with this "
"device)",
"Exclusive Process (many threads in one process is able to use "
"::hipSetDevice() with this device)",
"Unknown",
NULL};
printf(" Compute Mode:\n");
printf(" < %s >\n", sComputeMode[deviceProp.computeMode]);
}
// If there are 2 or more GPUs, query to determine whether RDMA is supported
if (deviceCount >= 2) {
hipDeviceProp_t prop[64];
int gpuid[64]; // we want to find the first two GPUs that can support P2P
int gpu_p2p_count = 0;
for (int i = 0; i < deviceCount; i++) {
checkCudaErrors(hipGetDeviceProperties(&prop[i], i));
// Only boards based on Fermi or later can support P2P
if ((prop[i].major >= 2)
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// on Windows (64-bit), the Tesla Compute Cluster driver for windows
// must be enabled to support this
&& prop[i].tccDriver
#endif
) {
// This is an array of P2P capable GPUs
gpuid[gpu_p2p_count++] = i;
}
}
// Show all the combinations of support P2P GPUs
int can_access_peer;
if (gpu_p2p_count >= 2) {
for (int i = 0; i < gpu_p2p_count; i++) {
for (int j = 0; j < gpu_p2p_count; j++) {
if (gpuid[i] == gpuid[j]) {
continue;
}
checkCudaErrors(
hipDeviceCanAccessPeer(&can_access_peer, gpuid[i], gpuid[j]));
printf("> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n",
prop[gpuid[i]].name, gpuid[i], prop[gpuid[j]].name, gpuid[j],
can_access_peer ? "Yes" : "No");
}
}
}
}
// csv masterlog info
// *****************************
// exe and CUDA driver name
printf("\n");
std::string sProfileString = "deviceQuery, CUDA Driver = CUDART";
char cTemp[16];
// driver version
sProfileString += ", CUDA Driver Version = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(cTemp, 10, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
#else
snprintf(cTemp, sizeof(cTemp), "%d.%d", driverVersion / 1000,
(driverVersion % 100) / 10);
#endif
sProfileString += cTemp;
// Runtime version
sProfileString += ", CUDA Runtime Version = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(cTemp, 10, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
#else
snprintf(cTemp, sizeof(cTemp), "%d.%d", runtimeVersion / 1000,
(runtimeVersion % 100) / 10);
#endif
sProfileString += cTemp;
// Device count
sProfileString += ", NumDevs = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(cTemp, 10, "%d", deviceCount);
#else
snprintf(cTemp, sizeof(cTemp), "%d", deviceCount);
#endif
sProfileString += cTemp;
sProfileString += "\n";
printf("%s", sProfileString.c_str());
printf("Result = PASS\n");
// finish
}
|
1746fe93205a56a294aeeb2fdd42025734006d55.cu
|
/*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample queries the properties of the CUDA devices present in the system
* via CUDA Runtime API. */
// std::system includes
#include <iostream>
#include <memory>
#include "helper_cuda.h"
#include <string>
#if CUDART_VERSION < 5000
// CUDA-C includes
#include <cuda.h>
// This function wraps the CUDA Driver API into a template function
template <class T>
inline void getCudaAttribute(T *attribute, CUdevice_attribute device_attribute,
int device) {
CUresult error = cuDeviceGetAttribute(attribute, device_attribute, device);
if (CUDA_SUCCESS != error) {
fprintf(
stderr,
"cuSafeCallNoSync() Driver API error = %04d from file <%s>, line %i.\n",
error, __FILE__, __LINE__);
//exit(EXIT_FAILURE);
//system("pause");
}
}
#endif /* CUDART_VERSION < 5000 */
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
void deviceQuery() {
printf(" Starting...\n\n");
printf(
" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n");
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess) {
printf("cudaGetDeviceCount returned %d\n-> %s\n",
static_cast<int>(error_id), cudaGetErrorString(error_id));
printf("Result = FAIL\n");
//exit(EXIT_FAILURE);
//system("pause");
}
// This function call returns 0 if there are no CUDA capable devices.
if (deviceCount == 0) {
printf("There are no available device(s) that support CUDA\n");
} else {
printf("Detected %d CUDA Capable device(s)\n", deviceCount);
}
int dev, driverVersion = 0, runtimeVersion = 0;
for (dev = 0; dev < deviceCount; ++dev) {
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("\nDevice %d: \"%s\"\n", dev, deviceProp.name);
// Console log
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n",
driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
char msg[256];
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(msg, sizeof(msg),
" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f),
(unsigned long long)deviceProp.totalGlobalMem);
#else
snprintf(msg, sizeof(msg),
" Total amount of global memory: %.0f MBytes "
"(%llu bytes)\n",
static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f),
(unsigned long long)deviceProp.totalGlobalMem);
#endif
printf("%s", msg);
printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n",
deviceProp.multiProcessorCount,
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor),
_ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) *
deviceProp.multiProcessorCount);
printf(
" GPU Max Clock rate: %.0f MHz (%0.2f "
"GHz)\n",
deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f);
#if CUDART_VERSION >= 5000
// This is supported in CUDA 5.0 (runtime API device properties)
printf(" Memory Clock rate: %.0f Mhz\n",
deviceProp.memoryClockRate * 1e-3f);
printf(" Memory Bus Width: %d-bit\n",
deviceProp.memoryBusWidth);
if (deviceProp.l2CacheSize) {
printf(" L2 Cache Size: %d bytes\n",
deviceProp.l2CacheSize);
}
#else
// This only available in CUDA 4.0-4.2 (but these were only exposed in the
// CUDA Driver API)
int memoryClock;
getCudaAttribute<int>(&memoryClock, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE,
dev);
printf(" Memory Clock rate: %.0f Mhz\n",
memoryClock * 1e-3f);
int memBusWidth;
getCudaAttribute<int>(&memBusWidth,
CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, dev);
printf(" Memory Bus Width: %d-bit\n",
memBusWidth);
int L2CacheSize;
getCudaAttribute<int>(&L2CacheSize, CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE, dev);
if (L2CacheSize) {
printf(" L2 Cache Size: %d bytes\n",
L2CacheSize);
}
#endif
printf(
" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, "
"%d), 3D=(%d, %d, %d)\n",
deviceProp.maxTexture1D, deviceProp.maxTexture2D[0],
deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0],
deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]);
printf(
" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n",
deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]);
printf(
" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d "
"layers\n",
deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1],
deviceProp.maxTexture2DLayered[2]);
printf(" Total amount of constant memory: %zu bytes\n",
deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %zu bytes\n",
deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n",
deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %zu bytes\n",
deviceProp.memPitch);
printf(" Texture alignment: %zu bytes\n",
deviceProp.textureAlignment);
printf(
" Concurrent copy and kernel execution: %s with %d copy "
"engine(s)\n",
(deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount);
printf(" Run time limit on kernels: %s\n",
deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No");
printf(" Integrated GPU sharing Host Memory: %s\n",
deviceProp.integrated ? "Yes" : "No");
printf(" Support host page-locked memory mapping: %s\n",
deviceProp.canMapHostMemory ? "Yes" : "No");
printf(" Alignment requirement for Surfaces: %s\n",
deviceProp.surfaceAlignment ? "Yes" : "No");
printf(" Device has ECC support: %s\n",
deviceProp.ECCEnabled ? "Enabled" : "Disabled");
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n",
deviceProp.tccDriver ? "TCC (Tesla Compute Cluster Driver)"
: "WDDM (Windows Display Driver Model)");
#endif
printf(" Device supports Unified Addressing (UVA): %s\n",
deviceProp.unifiedAddressing ? "Yes" : "No");
printf(" Device supports Compute Preemption: %s\n",
deviceProp.computePreemptionSupported ? "Yes" : "No");
printf(" Supports Cooperative Kernel Launch: %s\n",
deviceProp.cooperativeLaunch ? "Yes" : "No");
printf(" Supports MultiDevice Co-op Kernel Launch: %s\n",
deviceProp.cooperativeMultiDeviceLaunch ? "Yes" : "No");
printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n",
deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID);
const char *sComputeMode[] = {
"Default (multiple host threads can use ::cudaSetDevice() with device "
"simultaneously)",
"Exclusive (only one host thread in one process is able to use "
"::cudaSetDevice() with this device)",
"Prohibited (no host thread can use ::cudaSetDevice() with this "
"device)",
"Exclusive Process (many threads in one process is able to use "
"::cudaSetDevice() with this device)",
"Unknown",
NULL};
printf(" Compute Mode:\n");
printf(" < %s >\n", sComputeMode[deviceProp.computeMode]);
}
// If there are 2 or more GPUs, query to determine whether RDMA is supported
if (deviceCount >= 2) {
cudaDeviceProp prop[64];
int gpuid[64]; // we want to find the first two GPUs that can support P2P
int gpu_p2p_count = 0;
for (int i = 0; i < deviceCount; i++) {
checkCudaErrors(cudaGetDeviceProperties(&prop[i], i));
// Only boards based on Fermi or later can support P2P
if ((prop[i].major >= 2)
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
// on Windows (64-bit), the Tesla Compute Cluster driver for windows
// must be enabled to support this
&& prop[i].tccDriver
#endif
) {
// This is an array of P2P capable GPUs
gpuid[gpu_p2p_count++] = i;
}
}
// Show all the combinations of support P2P GPUs
int can_access_peer;
if (gpu_p2p_count >= 2) {
for (int i = 0; i < gpu_p2p_count; i++) {
for (int j = 0; j < gpu_p2p_count; j++) {
if (gpuid[i] == gpuid[j]) {
continue;
}
checkCudaErrors(
cudaDeviceCanAccessPeer(&can_access_peer, gpuid[i], gpuid[j]));
printf("> Peer access from %s (GPU%d) -> %s (GPU%d) : %s\n",
prop[gpuid[i]].name, gpuid[i], prop[gpuid[j]].name, gpuid[j],
can_access_peer ? "Yes" : "No");
}
}
}
}
// csv masterlog info
// *****************************
// exe and CUDA driver name
printf("\n");
std::string sProfileString = "deviceQuery, CUDA Driver = CUDART";
char cTemp[16];
// driver version
sProfileString += ", CUDA Driver Version = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(cTemp, 10, "%d.%d", driverVersion/1000, (driverVersion%100)/10);
#else
snprintf(cTemp, sizeof(cTemp), "%d.%d", driverVersion / 1000,
(driverVersion % 100) / 10);
#endif
sProfileString += cTemp;
// Runtime version
sProfileString += ", CUDA Runtime Version = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(cTemp, 10, "%d.%d", runtimeVersion/1000, (runtimeVersion%100)/10);
#else
snprintf(cTemp, sizeof(cTemp), "%d.%d", runtimeVersion / 1000,
(runtimeVersion % 100) / 10);
#endif
sProfileString += cTemp;
// Device count
sProfileString += ", NumDevs = ";
#if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64)
sprintf_s(cTemp, 10, "%d", deviceCount);
#else
snprintf(cTemp, sizeof(cTemp), "%d", deviceCount);
#endif
sProfileString += cTemp;
sProfileString += "\n";
printf("%s", sProfileString.c_str());
printf("Result = PASS\n");
// finish
}
|
888290f4e028c08a8c433ef006711ec7e28ddd54.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
extern "C" {
#include "finite-difference.h"
}
#define DEBUG 1
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline hipError_t checkCuda(hipError_t result, int lineNum)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess)
{
fprintf(stderr, "CUDA Runtime Error: (%s) %d at line %d\n", hipGetErrorString(result), result, lineNum);
// assert(result == hipSuccess);
exit(-1);
}
#endif
return(result);
}
struct resistor
{
char* c_mi;
int mx;
int my;
int mz;
int bytes;
};
struct resistor* RES_new(int dimX, int dimY, int dimZ)
{
//create new structure;
struct resistor* res = (struct resistor*)malloc(sizeof(struct resistor));
if(res == NULL)
return(NULL);
res->mx = dimX;
res->my = dimY;
res->mz = dimZ;
res->bytes = res->mx*res->my*res->mz * sizeof(char);
// allocate space in GPU;
checkCuda( hipMalloc((void**)&res->c_mi, res->bytes), __LINE__ );
return(res);
}
void RES_Destroy(struct resistor* res)
{
if(res == NULL)
return;
checkCuda( hipFree(res->c_mi), __LINE__ );
free(res);
}
void RES_MakeChip(struct resistor* res, int padX, int padY, int height, float val)
{
int xOff = res->mx/2;
int yOff = res->my/2;
// extrude left pad from copper
char* pad = (char*)malloc(padX*padY*sizeof(char));
memset(pad,3,padX*padY);
MatIndex_ExtrudeZ(res->c_mi, res->mx, res->my, res->mz, pad, padX, padY, xOff-padX/2, yOff-padY/2, 0, height);
// extrude right pad from copper
MatIndex_ExtrudeZ(res->c_mi, res->mx, res->my, res->mz, pad, padX, padY, xOff+padX/2, yOff+padY/2, 0, height);
// extrude ceramic botomm non-conductor
free(pad);
int baseX = res->mx-2*padX;
int baseY = padY;
pad = (char*)malloc(baseX*baseY*sizeof(char));
memset(pad,4,baseX*baseY);
MatIndex_ExtrudeZ(res->c_mi, res->mx, res->my, res->mz, pad, baseX, baseY, xOff, yOff, 0, height-1);
// compute conductivity value for resistor "surface"
memset(pad,4,baseX*baseY);
// extrude resistive surface
MatIndex_ExtrudeZ(res->c_mi, res->mx, res->my, res->mz, pad, baseX, baseY, xOff, yOff, height-1, height);
}
|
888290f4e028c08a8c433ef006711ec7e28ddd54.cu
|
#include <stdio.h>
#include <assert.h>
extern "C" {
#include "finite-difference.h"
}
#define DEBUG 1
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline cudaError_t checkCuda(cudaError_t result, int lineNum)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess)
{
fprintf(stderr, "CUDA Runtime Error: (%s) %d at line %d\n", cudaGetErrorString(result), result, lineNum);
// assert(result == cudaSuccess);
exit(-1);
}
#endif
return(result);
}
struct resistor
{
char* c_mi;
int mx;
int my;
int mz;
int bytes;
};
struct resistor* RES_new(int dimX, int dimY, int dimZ)
{
//create new structure;
struct resistor* res = (struct resistor*)malloc(sizeof(struct resistor));
if(res == NULL)
return(NULL);
res->mx = dimX;
res->my = dimY;
res->mz = dimZ;
res->bytes = res->mx*res->my*res->mz * sizeof(char);
// allocate space in GPU;
checkCuda( cudaMalloc((void**)&res->c_mi, res->bytes), __LINE__ );
return(res);
}
void RES_Destroy(struct resistor* res)
{
if(res == NULL)
return;
checkCuda( cudaFree(res->c_mi), __LINE__ );
free(res);
}
void RES_MakeChip(struct resistor* res, int padX, int padY, int height, float val)
{
int xOff = res->mx/2;
int yOff = res->my/2;
// extrude left pad from copper
char* pad = (char*)malloc(padX*padY*sizeof(char));
memset(pad,3,padX*padY);
MatIndex_ExtrudeZ(res->c_mi, res->mx, res->my, res->mz, pad, padX, padY, xOff-padX/2, yOff-padY/2, 0, height);
// extrude right pad from copper
MatIndex_ExtrudeZ(res->c_mi, res->mx, res->my, res->mz, pad, padX, padY, xOff+padX/2, yOff+padY/2, 0, height);
// extrude ceramic botomm non-conductor
free(pad);
int baseX = res->mx-2*padX;
int baseY = padY;
pad = (char*)malloc(baseX*baseY*sizeof(char));
memset(pad,4,baseX*baseY);
MatIndex_ExtrudeZ(res->c_mi, res->mx, res->my, res->mz, pad, baseX, baseY, xOff, yOff, 0, height-1);
// compute conductivity value for resistor "surface"
memset(pad,4,baseX*baseY);
// extrude resistive surface
MatIndex_ExtrudeZ(res->c_mi, res->mx, res->my, res->mz, pad, baseX, baseY, xOff, yOff, height-1, height);
}
|
59c26fb62fc0621402f981ac51850f6e3649f5c0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "gplex_mul.h"
#ifdef EIGEN_TEST
#include <Eigen/Dense>
using Matrix66 = Eigen::Matrix<float, 6, 6, Eigen::AutoAlign>;
__global__ void set_mem(Matrix66* a, float val, size_t N) {
Matrix66 v = Matrix66::Constant(val);
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
a[n] = v;
}
}
bool check(const int N, const Matrix66* c, bool managed)
{
const float eps = 1e-30f;
float c0, c36;
if (managed) {
c0 = c[0](0,0);
c36 = c[1](0,0);
int device = -1;
hipGetDevice(&device);
hipMemPrefetchAsync(c, sizeof(Matrix66)*N, device, NULL);
} else {
Matrix66 h[N];
hipMemcpy(&h, c, N*sizeof(Matrix66), hipMemcpyDefault);
c0 = h[0](0,0);
c36 = h[1](0,0);
}
bool pass = (std::abs(c0 - c36) < eps) && (std::abs(c0 - 6.0f) < eps);
if (!pass) {
std::cout << "Fail check c[0]=" << c0 << " c[36]=" << c36 << std::endl;
}
return pass;
}
__global__ void eigen_naive_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
c[n] = a[n] * b[n];
}
}
__global__ void eigen_reg_c_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
Matrix66 c_reg;
c_reg = a[n] * b[n];
c[n] = c_reg;
}
}
__global__ void eigen_reg_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
Matrix66 a_reg(a[n]), b_reg(b[n]);
Matrix66 c_reg(a_reg * b_reg);
c[n] = c_reg;
}
}
void eigen_run_naive_mul(int iter, bool managed)
{
constexpr int N = Nwidth;
constexpr int sz = sizeof(Matrix66)*N;
Matrix66* a;
Matrix66* b;
Matrix66* c;
if (managed) {
hipMallocManaged((void**)&a, sz);
hipMallocManaged((void**)&b, sz);
hipMallocManaged((void**)&c, sz);
int device = -1;
hipGetDevice(&device);
hipMemPrefetchAsync(a, sz, device, NULL);
hipMemPrefetchAsync(b, sz, device, NULL);
hipMemPrefetchAsync(c, sz, device, NULL);
} else {
hipMalloc((void**)&a, sz);
hipMalloc((void**)&b, sz);
hipMalloc((void**)&c, sz);
}
cudaCheckError();
dim3 grid (((N-1)/block_size + 1), 1, 1);
dim3 block (block_size, 1, 1);
hipLaunchKernelGGL(( set_mem) , dim3(grid), dim3(block) , 0, 0, a, 1.f , N);
hipLaunchKernelGGL(( set_mem) , dim3(grid), dim3(block) , 0, 0, b, 1.f, N);
hipLaunchKernelGGL(( set_mem) , dim3(grid), dim3(block) , 0, 0, c, 0.f, N);
if (managed) {
hipMemAdvise(a, sz, hipMemAdviseSetReadMostly, 0);
hipMemAdvise(b, sz, hipMemAdviseSetReadMostly, 0);
}
cudaCheckErrorSync();
for (int i = 0; i < iter; ++i)
hipLaunchKernelGGL(( eigen_naive_mult_kn) , dim3(grid), dim3(block) , 0, 0, a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
for (int i = 0; i < iter; ++i)
hipLaunchKernelGGL(( eigen_reg_c_mult_kn) , dim3(grid), dim3(block) , 0, 0, a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
for (int i = 0; i < iter; ++i)
hipLaunchKernelGGL(( eigen_reg_mult_kn) , dim3(grid), dim3(block) , 0, 0, a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
hipFree(a);
hipFree(b);
hipFree(c);
cudaCheckErrorSync();
}
#endif
|
59c26fb62fc0621402f981ac51850f6e3649f5c0.cu
|
#include "gplex_mul.h"
#ifdef EIGEN_TEST
#include <Eigen/Dense>
using Matrix66 = Eigen::Matrix<float, 6, 6, Eigen::AutoAlign>;
__global__ void set_mem(Matrix66* a, float val, size_t N) {
Matrix66 v = Matrix66::Constant(val);
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
a[n] = v;
}
}
bool check(const int N, const Matrix66* c, bool managed)
{
const float eps = 1e-30f;
float c0, c36;
if (managed) {
c0 = c[0](0,0);
c36 = c[1](0,0);
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(c, sizeof(Matrix66)*N, device, NULL);
} else {
Matrix66 h[N];
cudaMemcpy(&h, c, N*sizeof(Matrix66), cudaMemcpyDefault);
c0 = h[0](0,0);
c36 = h[1](0,0);
}
bool pass = (std::abs(c0 - c36) < eps) && (std::abs(c0 - 6.0f) < eps);
if (!pass) {
std::cout << "Fail check c[0]=" << c0 << " c[36]=" << c36 << std::endl;
}
return pass;
}
__global__ void eigen_naive_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
c[n] = a[n] * b[n];
}
}
__global__ void eigen_reg_c_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
Matrix66 c_reg;
c_reg = a[n] * b[n];
c[n] = c_reg;
}
}
__global__ void eigen_reg_mult_kn(const Matrix66* RESTRICT a, const Matrix66* RESTRICT b, Matrix66* c, const int N)
{
for (int n = threadIdx.x + blockIdx.x * blockDim.x;
n < N;
n += blockDim.x * gridDim.x) {
Matrix66 a_reg(a[n]), b_reg(b[n]);
Matrix66 c_reg(a_reg * b_reg);
c[n] = c_reg;
}
}
void eigen_run_naive_mul(int iter, bool managed)
{
constexpr int N = Nwidth;
constexpr int sz = sizeof(Matrix66)*N;
Matrix66* a;
Matrix66* b;
Matrix66* c;
if (managed) {
cudaMallocManaged((void**)&a, sz);
cudaMallocManaged((void**)&b, sz);
cudaMallocManaged((void**)&c, sz);
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(a, sz, device, NULL);
cudaMemPrefetchAsync(b, sz, device, NULL);
cudaMemPrefetchAsync(c, sz, device, NULL);
} else {
cudaMalloc((void**)&a, sz);
cudaMalloc((void**)&b, sz);
cudaMalloc((void**)&c, sz);
}
cudaCheckError();
dim3 grid (((N-1)/block_size + 1), 1, 1);
dim3 block (block_size, 1, 1);
set_mem <<< grid, block >>> (a, 1.f , N);
set_mem <<< grid, block >>> (b, 1.f, N);
set_mem <<< grid, block >>> (c, 0.f, N);
if (managed) {
cudaMemAdvise(a, sz, cudaMemAdviseSetReadMostly, 0);
cudaMemAdvise(b, sz, cudaMemAdviseSetReadMostly, 0);
}
cudaCheckErrorSync();
for (int i = 0; i < iter; ++i)
eigen_naive_mult_kn <<< grid, block >>> (a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
for (int i = 0; i < iter; ++i)
eigen_reg_c_mult_kn <<< grid, block >>> (a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
for (int i = 0; i < iter; ++i)
eigen_reg_mult_kn <<< grid, block >>> (a, b, c, N);
cudaCheckErrorSync();
assert(check(N, c, managed));
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaCheckErrorSync();
}
#endif
|
e4fb6f2b29fd60a74b4760339b6909303242e8a7.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (C) 2011 Dmitri Nikulin
// Copyright (C) 2011 Monash University
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
// Undo namespace damage done by CUDA's nvcc.
#undef isfinite
#undef isnan
#include <hip/hip_runtime.h>
#include "common.h"
// Number of threads per 1D group.
#define NTHREADS 512
// Prototype for external OpenCL FAST
void clfast(CVD::Image<CVD::byte> const & image);
// Declare 1-byte read-only texture object.
texture<uchar1, 2, hipReadModeElementType> static testImage;
__device__ int mask_test(uint x16) {
// Duplicate bit pattern to simulate barrel shift.
uint const x = (x16 | (x16 << 16));
// Accumulator.
uint x1 = x;
// AND against down-shifts.
#pragma unroll
for (uint i = 1; i < FAST_RING; i++)
x1 &= (x >> i);
// Return of 1 here proves that FAST_RING
// consecutive bits were 1.
return (x1 != 0);
}
__global__ void fast1_kernel(
int2 * corners,
int * icorner
) {
// Calculate (x,y) of center pixel.
int const x = ((blockIdx.x * blockDim.x) + threadIdx.x + X_OFF);
int const y = ((blockIdx.y * blockDim.y) + threadIdx.y + Y_OFF);
// Read center pixel, upcast to int.
int const p00 = tex2D(testImage, x, y).x;
// Include generated code here.
// Checks ring of pixels, and populates the boolean "isCorner".
#include "fast1_gen.cu"
if (isCorner) {
// Atomically append to corner buffer.
int const icorn = atomicAdd(icorner, 1);
if ((icorn >= 0) && (icorn < FAST_COUNT))
corners[icorn] = make_int2(x, y);
}
}
__global__ void fast2_kernel(
int2 const * i_corners,
int const * i_ncorners,
int2 * o_corners,
int * o_ncorners
) {
// Find input offset.
int const idx = ((blockIdx.x * blockDim.x) + threadIdx.x);
if ((idx < 0) || (idx >= i_ncorners[0]))
return;
// Read (x,y) of center pixel.
int2 const xy = i_corners[idx];
int const x = xy.x;
int const y = xy.y;
// Read center pixel, upcast to int.
int const p00 = tex2D(testImage, x, y).x;
// Include generated code here.
// Checks ring of pixels, and populates "pattern".
#include "fast2_gen.cu"
if (mask_test(pattern)) {
// Atomically append to corner buffer.
int const icorn = atomicAdd(o_ncorners, 1);
if (icorn < FAST_COUNT)
o_corners[icorn] = xy;
}
}
static void cufast(CVD::Image<CVD::byte> const & image) {
// Re-interpret image pointer.
uchar1 const * const data = reinterpret_cast<uchar1 const *>(image.data());
int const nx = image.size().x;
int const ny = image.size().y;
// Configure texture object.
testImage.addressMode[0] = hipAddressModeClamp;
testImage.addressMode[1] = hipAddressModeClamp;
testImage.filterMode = hipFilterModePoint;
testImage.normalized = false;
// Create channel descriptor.
hipChannelFormatDesc const format = hipCreateChannelDesc(8, 0, 0, 0, hipChannelFormatKindUnsigned);
// Allocate texture array.
hipArray * buffer = NULL;
hipMallocArray(&buffer, &format, nx, ny);
// Populate texture array.
hipMemcpyToArray(buffer, 0, 0, data, nx * ny * sizeof(uchar1), hipMemcpyHostToDevice);
hipBindTextureToArray(testImage, buffer, format);
// Allocate corner array 1.
int2 * corners1 = NULL;
hipMalloc(&corners1, sizeof(int2) * FAST_COUNT);
// Allocate corner array 2.
int2 * corners2 = NULL;
hipMalloc(&corners2, sizeof(int2) * FAST_COUNT);
// Allocate corner cursor 1.
int * icorner1 = NULL;
hipMalloc(&icorner1, sizeof(int));
// Allocate corner cursor 2.
int * icorner2 = NULL;
hipMalloc(&icorner2, sizeof(int));
// Reset corner cursors.
int const zero = 0;
hipMemcpy(icorner1, &zero, sizeof(zero), hipMemcpyHostToDevice);
hipMemcpy(icorner2, &zero, sizeof(zero), hipMemcpyHostToDevice);
// Create work grid 1.
dim3 const dimBlock1(16, 16, 1);
dim3 const dimGrid1((nx - (X_OFF * 2)) / dimBlock1.x, (ny - (Y_OFF * 2)) / dimBlock1.y, 1);
// Warmup.
hipLaunchKernelGGL(( fast1_kernel), dim3(dimGrid1), dim3(dimBlock1), 0, 0, corners1, icorner1);
hipLaunchKernelGGL(( fast2_kernel), dim3(FAST_COUNT / NTHREADS), dim3(NTHREADS), 0, 0, corners1, icorner1, corners2, icorner2);
// Reset corner cursors.
hipMemcpy(icorner1, &zero, sizeof(zero), hipMemcpyHostToDevice);
hipMemcpy(icorner2, &zero, sizeof(zero), hipMemcpyHostToDevice);
// Run kernels for time.
long const time1 = time(NULL);
for (int i = 0; i < REPEAT; i++) {
hipMemcpy(icorner1, &zero, sizeof(zero), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fast1_kernel), dim3(dimGrid1), dim3(dimBlock1), 0, 0, corners1, icorner1);
}
long const time2 = time(NULL);
for (int i = 0; i < REPEAT; i++) {
hipMemcpy(icorner2, &zero, sizeof(zero), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( fast2_kernel), dim3(FAST_COUNT / NTHREADS), dim3(NTHREADS), 0, 0, corners1, icorner1, corners2, icorner2);
}
long const time3 = time(NULL);
// Read number of corners.
int ncorners1 = 0;
int ncorners2 = 0;
hipMemcpy(&ncorners1, icorner1, sizeof(ncorners1), hipMemcpyDeviceToHost);
hipMemcpy(&ncorners2, icorner2, sizeof(ncorners2), hipMemcpyDeviceToHost);
// Cleanup.
hipFree(icorner2);
hipFree(icorner1);
hipFree(corners2);
hipFree(corners1);
hipFreeArray(buffer);
// Calculate microseconds per kernel.
int const us1 = (((time2 - time1) * 1000000) / REPEAT);
int const us2 = (((time3 - time2) * 1000000) / REPEAT);
// Report timing and number of corners.
std::cerr << "CUDA" << std::endl;
std::cerr << std::setw(12) << ncorners1 << " corners 1" << std::endl;
std::cerr << std::setw(12) << ncorners2 << " corners 2" << std::endl;
std::cerr << std::setw(12) << us1 << " microseconds 1" << std::endl;
std::cerr << std::setw(12) << us2 << " microseconds 2" << std::endl;
std::cerr << std::endl;
}
static void cxxfast(CVD::Image<CVD::byte> const & image) {
// Prepare corner buffer.
std::vector<CVD::ImageRef> cvd_corners;
cvd_corners.reserve(FAST_COUNT);
long const time1 = time(NULL);
for (int i = 0; i < REPEAT; i++) {
cvd_corners.clear();
CVD::fast_corner_detect_9(image, cvd_corners, FAST_THRESH);
}
long const time2 = time(NULL);
// Read number of corners.
int const ncorners3 = cvd_corners.size();
// Calculate microseconds per kernel.
int const us3 = (((time2 - time1) * 1000000) / REPEAT);
// Report timing and number of corners.
std::cerr << "C++" << std::endl;
std::cerr << std::setw(12) << ncorners3 << " corners 2" << std::endl;
std::cerr << std::setw(12) << us3 << " microseconds 1+2" << std::endl;
std::cerr << std::endl;
}
int main(int argc, char **argv) {
CVD::Image<CVD::byte> const fullImage = CVD::img_load("../images/shuttle.jpg");
CVD::ImageRef const fullSize = fullImage.size();
// Image size to keep for computation.
int const nx = 2048;
int const ny = 2048;
CVD::ImageRef const keepSize(nx, ny);
// Crop to sub-image.
CVD::Image<CVD::byte> keepImage(keepSize);
keepImage.copy_from(fullImage.sub_image(CVD::ImageRef(0, 0), keepSize));
// Benchmark all implementations.
cufast(keepImage);
clfast(keepImage);
cxxfast(keepImage);
return 0;
}
|
e4fb6f2b29fd60a74b4760339b6909303242e8a7.cu
|
// Copyright (C) 2011 Dmitri Nikulin
// Copyright (C) 2011 Monash University
//
// Permission is hereby granted, free of charge, to any person
// obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without
// restriction, including without limitation the rights to use,
// copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following
// conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
// HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
// WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
// OTHER DEALINGS IN THE SOFTWARE.
// Undo namespace damage done by CUDA's nvcc.
#undef isfinite
#undef isnan
#include <cuda.h>
#include "common.h"
// Number of threads per 1D group.
#define NTHREADS 512
// Prototype for external OpenCL FAST
void clfast(CVD::Image<CVD::byte> const & image);
// Declare 1-byte read-only texture object.
texture<uchar1, 2, cudaReadModeElementType> static testImage;
__device__ int mask_test(uint x16) {
// Duplicate bit pattern to simulate barrel shift.
uint const x = (x16 | (x16 << 16));
// Accumulator.
uint x1 = x;
// AND against down-shifts.
#pragma unroll
for (uint i = 1; i < FAST_RING; i++)
x1 &= (x >> i);
// Return of 1 here proves that FAST_RING
// consecutive bits were 1.
return (x1 != 0);
}
__global__ void fast1_kernel(
int2 * corners,
int * icorner
) {
// Calculate (x,y) of center pixel.
int const x = ((blockIdx.x * blockDim.x) + threadIdx.x + X_OFF);
int const y = ((blockIdx.y * blockDim.y) + threadIdx.y + Y_OFF);
// Read center pixel, upcast to int.
int const p00 = tex2D(testImage, x, y).x;
// Include generated code here.
// Checks ring of pixels, and populates the boolean "isCorner".
#include "fast1_gen.cu"
if (isCorner) {
// Atomically append to corner buffer.
int const icorn = atomicAdd(icorner, 1);
if ((icorn >= 0) && (icorn < FAST_COUNT))
corners[icorn] = make_int2(x, y);
}
}
__global__ void fast2_kernel(
int2 const * i_corners,
int const * i_ncorners,
int2 * o_corners,
int * o_ncorners
) {
// Find input offset.
int const idx = ((blockIdx.x * blockDim.x) + threadIdx.x);
if ((idx < 0) || (idx >= i_ncorners[0]))
return;
// Read (x,y) of center pixel.
int2 const xy = i_corners[idx];
int const x = xy.x;
int const y = xy.y;
// Read center pixel, upcast to int.
int const p00 = tex2D(testImage, x, y).x;
// Include generated code here.
// Checks ring of pixels, and populates "pattern".
#include "fast2_gen.cu"
if (mask_test(pattern)) {
// Atomically append to corner buffer.
int const icorn = atomicAdd(o_ncorners, 1);
if (icorn < FAST_COUNT)
o_corners[icorn] = xy;
}
}
static void cufast(CVD::Image<CVD::byte> const & image) {
// Re-interpret image pointer.
uchar1 const * const data = reinterpret_cast<uchar1 const *>(image.data());
int const nx = image.size().x;
int const ny = image.size().y;
// Configure texture object.
testImage.addressMode[0] = cudaAddressModeClamp;
testImage.addressMode[1] = cudaAddressModeClamp;
testImage.filterMode = cudaFilterModePoint;
testImage.normalized = false;
// Create channel descriptor.
cudaChannelFormatDesc const format = cudaCreateChannelDesc(8, 0, 0, 0, cudaChannelFormatKindUnsigned);
// Allocate texture array.
cudaArray * buffer = NULL;
cudaMallocArray(&buffer, &format, nx, ny);
// Populate texture array.
cudaMemcpyToArray(buffer, 0, 0, data, nx * ny * sizeof(uchar1), cudaMemcpyHostToDevice);
cudaBindTextureToArray(testImage, buffer, format);
// Allocate corner array 1.
int2 * corners1 = NULL;
cudaMalloc(&corners1, sizeof(int2) * FAST_COUNT);
// Allocate corner array 2.
int2 * corners2 = NULL;
cudaMalloc(&corners2, sizeof(int2) * FAST_COUNT);
// Allocate corner cursor 1.
int * icorner1 = NULL;
cudaMalloc(&icorner1, sizeof(int));
// Allocate corner cursor 2.
int * icorner2 = NULL;
cudaMalloc(&icorner2, sizeof(int));
// Reset corner cursors.
int const zero = 0;
cudaMemcpy(icorner1, &zero, sizeof(zero), cudaMemcpyHostToDevice);
cudaMemcpy(icorner2, &zero, sizeof(zero), cudaMemcpyHostToDevice);
// Create work grid 1.
dim3 const dimBlock1(16, 16, 1);
dim3 const dimGrid1((nx - (X_OFF * 2)) / dimBlock1.x, (ny - (Y_OFF * 2)) / dimBlock1.y, 1);
// Warmup.
fast1_kernel<<<dimGrid1, dimBlock1, 0>>>(corners1, icorner1);
fast2_kernel<<<FAST_COUNT / NTHREADS, NTHREADS>>>(corners1, icorner1, corners2, icorner2);
// Reset corner cursors.
cudaMemcpy(icorner1, &zero, sizeof(zero), cudaMemcpyHostToDevice);
cudaMemcpy(icorner2, &zero, sizeof(zero), cudaMemcpyHostToDevice);
// Run kernels for time.
long const time1 = time(NULL);
for (int i = 0; i < REPEAT; i++) {
cudaMemcpy(icorner1, &zero, sizeof(zero), cudaMemcpyHostToDevice);
fast1_kernel<<<dimGrid1, dimBlock1, 0>>>(corners1, icorner1);
}
long const time2 = time(NULL);
for (int i = 0; i < REPEAT; i++) {
cudaMemcpy(icorner2, &zero, sizeof(zero), cudaMemcpyHostToDevice);
fast2_kernel<<<FAST_COUNT / NTHREADS, NTHREADS>>>(corners1, icorner1, corners2, icorner2);
}
long const time3 = time(NULL);
// Read number of corners.
int ncorners1 = 0;
int ncorners2 = 0;
cudaMemcpy(&ncorners1, icorner1, sizeof(ncorners1), cudaMemcpyDeviceToHost);
cudaMemcpy(&ncorners2, icorner2, sizeof(ncorners2), cudaMemcpyDeviceToHost);
// Cleanup.
cudaFree(icorner2);
cudaFree(icorner1);
cudaFree(corners2);
cudaFree(corners1);
cudaFreeArray(buffer);
// Calculate microseconds per kernel.
int const us1 = (((time2 - time1) * 1000000) / REPEAT);
int const us2 = (((time3 - time2) * 1000000) / REPEAT);
// Report timing and number of corners.
std::cerr << "CUDA" << std::endl;
std::cerr << std::setw(12) << ncorners1 << " corners 1" << std::endl;
std::cerr << std::setw(12) << ncorners2 << " corners 2" << std::endl;
std::cerr << std::setw(12) << us1 << " microseconds 1" << std::endl;
std::cerr << std::setw(12) << us2 << " microseconds 2" << std::endl;
std::cerr << std::endl;
}
static void cxxfast(CVD::Image<CVD::byte> const & image) {
// Prepare corner buffer.
std::vector<CVD::ImageRef> cvd_corners;
cvd_corners.reserve(FAST_COUNT);
long const time1 = time(NULL);
for (int i = 0; i < REPEAT; i++) {
cvd_corners.clear();
CVD::fast_corner_detect_9(image, cvd_corners, FAST_THRESH);
}
long const time2 = time(NULL);
// Read number of corners.
int const ncorners3 = cvd_corners.size();
// Calculate microseconds per kernel.
int const us3 = (((time2 - time1) * 1000000) / REPEAT);
// Report timing and number of corners.
std::cerr << "C++" << std::endl;
std::cerr << std::setw(12) << ncorners3 << " corners 2" << std::endl;
std::cerr << std::setw(12) << us3 << " microseconds 1+2" << std::endl;
std::cerr << std::endl;
}
int main(int argc, char **argv) {
CVD::Image<CVD::byte> const fullImage = CVD::img_load("../images/shuttle.jpg");
CVD::ImageRef const fullSize = fullImage.size();
// Image size to keep for computation.
int const nx = 2048;
int const ny = 2048;
CVD::ImageRef const keepSize(nx, ny);
// Crop to sub-image.
CVD::Image<CVD::byte> keepImage(keepSize);
keepImage.copy_from(fullImage.sub_image(CVD::ImageRef(0, 0), keepSize));
// Benchmark all implementations.
cufast(keepImage);
clfast(keepImage);
cxxfast(keepImage);
return 0;
}
|
3ac48c7e629a9960bb21d16cdc9dc198efcb665b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <THH/THHBlas.h>
#include <THH/THHGeneral.h>
#include <TH/THHalf.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPBlas.h>
#include <algorithm>
#include <mutex>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
/* Level 2 */
void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda)
{
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
// TODO: why does Level3 check trans but this doesn't?
if (n <= 1)
*lda = std::max<int64_t>(m, 1);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
hipblasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return HIPBLAS_OP_T;
else if (trans == 'n') return HIPBLAS_OP_N;
else if (trans == 'c') return HIPBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return HIPBLAS_OP_T;
}
}
void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
if(n <= 1)
*ldc = std::max<int64_t>(m, 1);
if(transa_)
{
if(m <= 1)
*lda = std::max<int64_t>(k, 1);
}
else
{
if(k <= 1)
*lda = std::max<int64_t>(m, 1);
}
if(transb_)
{
if(k <= 1)
*ldb = std::max<int64_t>(n, 1);
}
else
{
if(n <= 1)
*ldb = std::max<int64_t>(k, 1);
}
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
// In CUDA 8.0, definition of data types for sgemmex changed
#if TORCH_HIP_VERSION < 8000
# define HIP_R_16F HIPBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc)
{
at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#if defined(__HIP_PLATFORM_HCC__) || defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc)
{
at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB,
at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#ifdef __HIP_PLATFORM_HCC__
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA,
b, rocblas_datatype_f16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC,
c, rocblas_datatype_f16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0));
#else
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#endif // TORCH_HIP_VERSION < 11000
THCublasCheck(hipblasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, HIP_R_16F, (int)lda, strideA,
b, HIP_R_16F, (int)ldb, strideB,
(void*)&fBeta, c, HIP_R_16F, (int)ldc, strideC,
(int)batchCount, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // TORCH_HIP_VERSION < 11000
#endif // __HIP_PLATFORM_HCC__
}
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA,
b, rocblas_datatype_bf16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC,
c, rocblas_datatype_bf16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0, NULL, NULL));
}
#endif // __HIP_PLATFORM_HCC__
#if defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
hipDeviceProp_t* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major < 8) {
TORCH_CHECK(false, "BFloat16 gemm in CUDA requires Ampere or later GPU");
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
THCublasCheck(hipblasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, CUDA_R_16BF, (int)lda, strideA,
b, CUDA_R_16BF, (int)ldb, strideB,
(void*)&fBeta, c, CUDA_R_16BF, (int)ldc, strideC,
(int)batchCount, HIP_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
#endif // defined(TORCH_HIP_VERSION) && TORCH_HIP_VERSION >= 11000
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
hipblasOperation_t opa = convertTransToCublasOperation(transa);
hipblasOperation_t opb = convertTransToCublasOperation(transb);
hipblasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(hipblasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
|
3ac48c7e629a9960bb21d16cdc9dc198efcb665b.cu
|
#include <THC/THCBlas.h>
#include <THC/THCGeneral.h>
#include <TH/THHalf.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDABlas.h>
#include <algorithm>
#include <mutex>
#ifdef __HIP_PLATFORM_HCC__
#include <hip/hip_version.h>
#endif
/* Level 2 */
void adjustLdLevel2(int64_t m, int64_t n, int64_t *lda)
{
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
// TODO: why does Level3 check trans but this doesn't?
if (n <= 1)
*lda = std::max<int64_t>(m, 1);
}
void THCudaBlas_Sger(THCState *state, int64_t m, int64_t n, float alpha, float *x, int64_t incx, float *y, int64_t incy, float *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Sger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
void THCudaBlas_Dger(THCState *state, int64_t m, int64_t n, double alpha, double *x, int64_t incx, double *y, int64_t incy, double *a, int64_t lda)
{
adjustLdLevel2(m, n, &lda);
if( (m <= INT_MAX) && (n <= INT_MAX) && (lda <= INT_MAX) && (incx <= INT_MAX) && (incy <= INT_MAX) )
{
int i_m = (int)m;
int i_n = (int)n;
int i_lda = (int)lda;
int i_incx = (int)incx;
int i_incy = (int)incy;
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDger(handle, i_m, i_n, &alpha, x, i_incx, y, i_incy, a, i_lda));
return;
}
THError("Cublas_Dger only supports m, n, lda, incx, incy"
"with the bound [val] <= %d", INT_MAX);
}
cublasOperation_t convertTransToCublasOperation(char trans) {
if (trans == 't') return CUBLAS_OP_T;
else if (trans == 'n') return CUBLAS_OP_N;
else if (trans == 'c') return CUBLAS_OP_C;
else {
THError("trans must be one of: t, n, c");
return CUBLAS_OP_T;
}
}
void adjustLdLevel3(char transa, char transb, int64_t m, int64_t n, int64_t k, int64_t *lda, int64_t *ldb, int64_t *ldc)
{
int transa_ = ((transa == 't') || (transa == 'T'));
int transb_ = ((transb == 't') || (transb == 'T'));
// Note: leading dimensions generally are checked that they are > 0 and at least as big the result
// requires (even if the value won't be used).
if(n <= 1)
*ldc = std::max<int64_t>(m, 1);
if(transa_)
{
if(m <= 1)
*lda = std::max<int64_t>(k, 1);
}
else
{
if(k <= 1)
*lda = std::max<int64_t>(m, 1);
}
if(transb_)
{
if(k <= 1)
*ldb = std::max<int64_t>(n, 1);
}
else
{
if(n <= 1)
*ldb = std::max<int64_t>(k, 1);
}
}
/* Level 3 */
void THCudaBlas_Sgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, float alpha, float *a, int64_t lda, float *b, int64_t ldb, float beta, float *c, int64_t ldc)
{
at::cuda::blas::gemm<float>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
// In CUDA 8.0, definition of data types for sgemmex changed
#if CUDA_VERSION < 8000
# define CUDA_R_16F CUBLAS_DATA_HALF
#endif
void THCudaBlas_Hgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::Half alpha, at::Half *a, int64_t lda, at::Half *b, int64_t ldb, at::Half beta, at::Half *c, int64_t ldc)
{
at::cuda::blas::gemm<at::Half>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#if defined(__HIP_PLATFORM_HCC__) || defined(CUDA_VERSION) && CUDA_VERSION >= 11000
void THCudaBlas_Bgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, at::BFloat16 alpha, at::BFloat16 *a, int64_t lda, at::BFloat16 *b, int64_t ldb, at::BFloat16 beta, at::BFloat16 *c, int64_t ldc)
{
at::cuda::blas::gemm<at::BFloat16>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
#endif
void THCudaBlas_Dgemm(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k, double alpha, double *a, int64_t lda, double *b, int64_t ldb, double beta, double *c, int64_t ldc)
{
at::cuda::blas::gemm<double>(transa, transb, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
}
void THCudaBlas_HgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::Half alpha, const at::Half *a, int64_t lda, int64_t strideA, const at::Half *b, int64_t ldb, int64_t strideB,
at::Half beta, at::Half *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
#ifdef __HIP_PLATFORM_HCC__
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_f16_r, (int)lda, strideA,
b, rocblas_datatype_f16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_f16_r, (int)ldc, strideC,
c, rocblas_datatype_f16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0));
#else
#if defined(CUDA_VERSION) && CUDA_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_TENSOR_OP_MATH));
#endif // CUDA_VERSION < 11000
THCublasCheck(cublasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, CUDA_R_16F, (int)lda, strideA,
b, CUDA_R_16F, (int)ldb, strideB,
(void*)&fBeta, c, CUDA_R_16F, (int)ldc, strideC,
(int)batchCount, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
#if defined(CUDA_VERSION) && CUDA_VERSION < 11000
// On CUDA versions prior to 11, users are required to set the math mode to CUBLAS_TENSOR_OP_MATH
// manually to be able to use tensor cores for FP16. On CUDA 11, this is no longer required.
THCublasCheck(cublasSetMathMode(handle, CUBLAS_DEFAULT_MATH));
#endif // CUDA_VERSION < 11000
#endif // __HIP_PLATFORM_HCC__
}
#ifdef __HIP_PLATFORM_HCC__
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
THCublasCheck(rocblas_gemm_strided_batched_ex(handle, opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, rocblas_datatype_bf16_r, (int)lda, strideA,
b, rocblas_datatype_bf16_r, (int)ldb, strideB,
(void*)&fBeta, c, rocblas_datatype_bf16_r, (int)ldc, strideC,
c, rocblas_datatype_bf16_r, (int)ldc, strideC,
(int) batchCount, rocblas_datatype_f32_r, rocblas_gemm_algo_standard,
0, 0, NULL, NULL));
}
#endif // __HIP_PLATFORM_HCC__
#if defined(CUDA_VERSION) && CUDA_VERSION >= 11000
void THCudaBlas_BgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
at::BFloat16 alpha, const at::BFloat16 *a, int64_t lda, int64_t strideA, const at::BFloat16 *b, int64_t ldb, int64_t strideB,
at::BFloat16 beta, at::BFloat16 *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties();
if (prop->major < 8) {
TORCH_CHECK(false, "BFloat16 gemm in CUDA requires Ampere or later GPU");
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
float fAlpha = alpha;
float fBeta = beta;
THCublasCheck(cublasGemmStridedBatchedEx(handle,
opa, opb, (int)m, (int)n, (int)k,
(void*)&fAlpha, a, CUDA_R_16BF, (int)lda, strideA,
b, CUDA_R_16BF, (int)ldb, strideB,
(void*)&fBeta, c, CUDA_R_16BF, (int)ldc, strideC,
(int)batchCount, CUDA_R_32F, CUBLAS_GEMM_DEFAULT_TENSOR_OP));
}
#endif // defined(CUDA_VERSION) && CUDA_VERSION >= 11000
void THCudaBlas_SgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a[], int64_t lda, const float *b[], int64_t ldb,
float beta, float *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_SgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
void THCudaBlas_SgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
float alpha, const float *a, int64_t lda, int64_t strideA, const float *b, int64_t ldb, int64_t strideB,
float beta, float *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_SgemmStridedBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasSgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
void THCudaBlas_DgemmBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a[], int64_t lda, const double *b[], int64_t ldb,
double beta, double *c[], int64_t ldc, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
#ifdef __HIP_PLATFORM_HCC__
const int64_t stridea = (transa == 'N' || transa == 'n') ? lda*k : lda*n;
const int64_t strideb = (transb == 'N' || transb == 'n') ? ldb*n : ldb*k;
const int64_t stridec = ldc*n;
THCudaBlas_DgemmStridedBatched(state, transa, transb, m, n, k, alpha, *a, lda, stridea, *b, ldb, strideb, beta, *c, ldc, stridec, batchCount);
#else
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDgemmBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, b, (int)ldb, &beta, c, (int)ldc,
(int)batchCount));
#endif
}
void THCudaBlas_DgemmStridedBatched(THCState *state, char transa, char transb, int64_t m, int64_t n, int64_t k,
double alpha, const double *a, int64_t lda, int64_t strideA, const double *b, int64_t ldb, int64_t strideB,
double beta, double *c, int64_t ldc, int64_t strideC, int64_t batchCount)
{
// See Note [Writing Nondeterministic Operations]
at::globalContext().alertCuBLASConfigNotDeterministic();
if( (m >= INT_MAX) || (n >= INT_MAX) || (k >= INT_MAX) || (lda >= INT_MAX) || (ldb >= INT_MAX) || (ldc >= INT_MAX) || (batchCount >= INT_MAX) )
{
THError("Cublas_DgemmBatched only supports m, n, k, lda, ldb, ldc, batchCount"
"with the bound [val] <= %d", INT_MAX);
}
adjustLdLevel3(transa, transb, m, n, k, &lda, &ldb, &ldc);
cublasOperation_t opa = convertTransToCublasOperation(transa);
cublasOperation_t opb = convertTransToCublasOperation(transb);
cublasHandle_t handle = at::cuda::getCurrentCUDABlasHandle();
THCublasCheck(cublasDgemmStridedBatched(handle,
opa, opb, (int)m, (int)n, (int)k,
&alpha, a, (int)lda, strideA, b, (int)ldb, strideB, &beta, c, (int)ldc, strideC,
(int)batchCount));
}
|
cb30b4be9a4ff9416e19adddcf5ec53dc5bde982.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Programming Massively Parallel Processors - 3ed
* Chapter 3
* In this chapter the blur and color_to_grayscale functions are presented
* The "nvixnu__" libraries used here are available at https://gist.github.com/nvixnu.
*
* Created on: 30/11/2020
* Author: Nvixnu
*/
#include <stdio.h>
#include <time.h>
#include "ch3__config.h"
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include "nvixnu__error_utils.h"
using namespace cv;
__host__
__device__
__attribute__((always_inline))
inline void blur_unit (uchar *input, uchar *output, const int blur_size, const int width, const int height, int row, int col){
int pix_val = 0;
int pixels = 0;
for(int blur_row = -blur_size; blur_row < blur_size+1; ++blur_row){
for(int blur_col = -blur_size; blur_col < blur_size+1; ++blur_col){
int cur_row = row + blur_row;
int cur_col = col + blur_col;
if(cur_row > -1 && cur_row < height && cur_col > -1 && cur_col < width){
pix_val += input[cur_row * width + cur_col];
pixels++;
}
}
}
output[row * width + col] = (uchar)(pix_val/pixels);
}
__global__
void blur_kernel(uchar *input, uchar *output, const int blur_size, const int width, const int height){
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if(col < width && row < height){
blur_unit(input, output, blur_size, width, height, row, col);
}
}
void ch3__blur_device(uchar *h_input, uchar *h_output, const int blur_size, const int width, const int height, kernel_config_t config){
uchar *d_input, *d_output;
const int length = width*height;
CCE(hipMalloc(&d_input, length*sizeof(uchar)));
CCE(hipMalloc(&d_output, length*sizeof(uchar)));
CCE(hipMemcpy(d_input, h_input, length*sizeof(uchar), hipMemcpyHostToDevice));
dim3 block_dim(config.block_dim.x, config.block_dim.y, 1);
dim3 grid_dim(ceil(width/(double)config.block_dim.x), ceil(height/(double)config.block_dim.y), 1);
DEVICE_TIC(0);
hipLaunchKernelGGL(( blur_kernel), dim3(grid_dim), dim3(block_dim), 0, 0, d_input, d_output, blur_size, width, height);
CCLE();
DEVICE_TOC(0);
CCE(hipMemcpy(h_output, d_output, length*sizeof(uchar), hipMemcpyDeviceToHost));
CCE(hipFree(d_input));
CCE(hipFree(d_output));
}
void ch3__blur_host(uchar *input, uchar *output, const int blur_size, const int width, const int height){
HOST_TIC(0);
for(int row = 0; row < height; row++){
for(int col = 0; col < width; col++){
blur_unit(input, output, blur_size, width, height, row, col);
}
}
HOST_TOC(0);
}
void ch3__blur(env_e env, kernel_config_t config){
// reads the image file
Mat src = imread(CH3__INPUT_FILE_BLUR, IMREAD_GRAYSCALE);
// gets the total number of pixels
int length = src.rows*src.cols; //Or src.elemSize() * src.total()
// Pointers to pixel arrays
uchar *input, *output;
const char * output_filename;
//Check if the image can be read
if(src.empty()){
printf("Could not read the image!\n");
return;
}
//Allocates the input and output pixel arrays
input = (uchar *)malloc(length);
output = (uchar *)malloc(length);
//Copy the pixels from image to the input array
memcpy(input, src.data, length);
//Lauch the blur function
if(env == Host){
ch3__blur_host(input, output, CH3__BLUR_WIDTH, src.cols, src.rows);
output_filename = CH3__OUTPUT_HOST_FILE_BLUR;
}else{
ch3__blur_device(input, output, CH3__BLUR_WIDTH, src.cols, src.rows, config);
output_filename = CH3__OUTPUT_DEVICE_FILE_BLUR;
}
//Copy the output pixel array to a destination Mat opject
Mat dst(src.rows, src.cols, CV_8UC1, output);
// Save the grayscale image to the appropriate file
imwrite(output_filename, dst);
return;
}
int main(){
printf("Chapter 03\n");
printf("Input: %s\n", CH3__INPUT_FILE_BLUR);
printf("Device output: %s\n", CH3__OUTPUT_DEVICE_FILE_BLUR);
printf("Host output: %s\n", CH3__OUTPUT_HOST_FILE_BLUR);
printf("\n_____ blur _____\n\n");
printf("Running on Device with 256 threads per block...");
ch3__blur(Device, {.block_dim = {16,16,1}});
printf("\nRunning on Device with 1024 threads per block...");
ch3__blur(Device, {.block_dim = {32,32,1}});
printf("\n_____ blur_CPU _____\n");
ch3__blur(Host, {});
return 0;
}
|
cb30b4be9a4ff9416e19adddcf5ec53dc5bde982.cu
|
/*
* Programming Massively Parallel Processors - 3ed
* Chapter 3
* In this chapter the blur and color_to_grayscale functions are presented
* The "nvixnu__" libraries used here are available at https://gist.github.com/nvixnu.
*
* Created on: 30/11/2020
* Author: Nvixnu
*/
#include <stdio.h>
#include <time.h>
#include "ch3__config.h"
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include "nvixnu__error_utils.h"
using namespace cv;
__host__
__device__
__attribute__((always_inline))
inline void blur_unit (uchar *input, uchar *output, const int blur_size, const int width, const int height, int row, int col){
int pix_val = 0;
int pixels = 0;
for(int blur_row = -blur_size; blur_row < blur_size+1; ++blur_row){
for(int blur_col = -blur_size; blur_col < blur_size+1; ++blur_col){
int cur_row = row + blur_row;
int cur_col = col + blur_col;
if(cur_row > -1 && cur_row < height && cur_col > -1 && cur_col < width){
pix_val += input[cur_row * width + cur_col];
pixels++;
}
}
}
output[row * width + col] = (uchar)(pix_val/pixels);
}
__global__
void blur_kernel(uchar *input, uchar *output, const int blur_size, const int width, const int height){
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if(col < width && row < height){
blur_unit(input, output, blur_size, width, height, row, col);
}
}
void ch3__blur_device(uchar *h_input, uchar *h_output, const int blur_size, const int width, const int height, kernel_config_t config){
uchar *d_input, *d_output;
const int length = width*height;
CCE(cudaMalloc(&d_input, length*sizeof(uchar)));
CCE(cudaMalloc(&d_output, length*sizeof(uchar)));
CCE(cudaMemcpy(d_input, h_input, length*sizeof(uchar), cudaMemcpyHostToDevice));
dim3 block_dim(config.block_dim.x, config.block_dim.y, 1);
dim3 grid_dim(ceil(width/(double)config.block_dim.x), ceil(height/(double)config.block_dim.y), 1);
DEVICE_TIC(0);
blur_kernel<<<grid_dim, block_dim>>>(d_input, d_output, blur_size, width, height);
CCLE();
DEVICE_TOC(0);
CCE(cudaMemcpy(h_output, d_output, length*sizeof(uchar), cudaMemcpyDeviceToHost));
CCE(cudaFree(d_input));
CCE(cudaFree(d_output));
}
void ch3__blur_host(uchar *input, uchar *output, const int blur_size, const int width, const int height){
HOST_TIC(0);
for(int row = 0; row < height; row++){
for(int col = 0; col < width; col++){
blur_unit(input, output, blur_size, width, height, row, col);
}
}
HOST_TOC(0);
}
void ch3__blur(env_e env, kernel_config_t config){
// reads the image file
Mat src = imread(CH3__INPUT_FILE_BLUR, IMREAD_GRAYSCALE);
// gets the total number of pixels
int length = src.rows*src.cols; //Or src.elemSize() * src.total()
// Pointers to pixel arrays
uchar *input, *output;
const char * output_filename;
//Check if the image can be read
if(src.empty()){
printf("Could not read the image!\n");
return;
}
//Allocates the input and output pixel arrays
input = (uchar *)malloc(length);
output = (uchar *)malloc(length);
//Copy the pixels from image to the input array
memcpy(input, src.data, length);
//Lauch the blur function
if(env == Host){
ch3__blur_host(input, output, CH3__BLUR_WIDTH, src.cols, src.rows);
output_filename = CH3__OUTPUT_HOST_FILE_BLUR;
}else{
ch3__blur_device(input, output, CH3__BLUR_WIDTH, src.cols, src.rows, config);
output_filename = CH3__OUTPUT_DEVICE_FILE_BLUR;
}
//Copy the output pixel array to a destination Mat opject
Mat dst(src.rows, src.cols, CV_8UC1, output);
// Save the grayscale image to the appropriate file
imwrite(output_filename, dst);
return;
}
int main(){
printf("Chapter 03\n");
printf("Input: %s\n", CH3__INPUT_FILE_BLUR);
printf("Device output: %s\n", CH3__OUTPUT_DEVICE_FILE_BLUR);
printf("Host output: %s\n", CH3__OUTPUT_HOST_FILE_BLUR);
printf("\n_____ blur _____\n\n");
printf("Running on Device with 256 threads per block...");
ch3__blur(Device, {.block_dim = {16,16,1}});
printf("\nRunning on Device with 1024 threads per block...");
ch3__blur(Device, {.block_dim = {32,32,1}});
printf("\n_____ blur_CPU _____\n");
ch3__blur(Host, {});
return 0;
}
|
099744f39eb1e653572acb207e88f8cb1c5d7d27.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* main.cpp
*
* Created on: Sep 4, 2018
* Author: hb4ch
*/
#include <cstdio>
#include <cstdlib>
#include <sys/stat.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "benchmark.hpp"
#include "misc.hpp"
#include "raid.hpp"
unsigned char original_key[16] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, };
// Dummy key for testing
void test_raid(int argc, char ** argv) {
if (argc != 4) {
std::printf("Wrong usage.\n");
exit(0);
}
size_t bs = atoi(argv[2]);
bs *= 1024;
// KBs
int hq = atoi(argv[3]);
char * file_name = argv[1];
FILE * fp = fopen(file_name, "r");
if (!fp) {
std::printf("Error reading file.\n");
exit(0);
}
struct stat st;
stat(file_name, &st);
size_t file_size = st.st_size;
if (file_size % 128 != 0) {
std::printf("Your file needs padding to align with 16 bytes.\n");
exit(0);
}
//unsigned char * fb = (unsigned char * )malloc(file_size);
unsigned char * h_fb;
checkCudaErrors(hipHostMalloc((void ** )&h_fb, file_size));
fread(h_fb, 1, file_size, fp);
fclose(fp);
std::printf("File read successful.\n");
// File is now read in properly.
raid_benchmark rb(bs, file_size / bs, file_size, (uint8_t * )h_fb, hq);
rb.start();
hipFree(h_fb);
}
void test_aes(int argc, char ** argv) {
if (argc != 4) {
std::printf("Wrong usage.\n");
exit(0);
}
size_t bs = atoi(argv[2]);
bs *= 1024;
// KBs
int hq = atoi(argv[3]);
char * file_name = argv[1];
FILE * fp = fopen(file_name, "r");
if (!fp) {
std::printf("Error reading file.\n");
exit(0);
}
struct stat st;
stat(file_name, &st);
size_t file_size = st.st_size;
if (file_size % 128 != 0) {
std::printf("Your file needs padding to align with 16 bytes.\n");
exit(0);
}
//unsigned char * fb = (unsigned char * )malloc(file_size);
unsigned char * h_fb;
checkCudaErrors(hipHostMalloc((void ** )&h_fb, file_size));
fread(h_fb, 1, file_size, fp);
fclose(fp);
std::printf("File read successful.\n");
// File is now read in properly.
unsigned int * expanded_key = (unsigned int *) malloc(176);
CPU_KeyExpansion(original_key, expanded_key);
// Key is now expanded.
/*
std::printf("Expanded key: \n");
for(int i = 0; i < 44; i++) {
std::printf("%d ", expanded_key[i]);
if(i % 4 == 0)
std::printf("\n");
}
std::printf("\n");
*/
std::printf("Key is now expanded.\n");
aes_benchmark ab(expanded_key, bs, file_size / bs, file_size, h_fb, hq);
// 4096 = 4KB is the batch_size
ab.start();
// Here we go ...
free(expanded_key);
hipFree(h_fb);
}
int main(int argc, char ** argv) {
//test_aes(argc, argv);
test_raid(argc, argv);
return 0;
}
|
099744f39eb1e653572acb207e88f8cb1c5d7d27.cu
|
/*
* main.cpp
*
* Created on: Sep 4, 2018
* Author: hb4ch
*/
#include <cstdio>
#include <cstdlib>
#include <sys/stat.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "benchmark.hpp"
#include "misc.hpp"
#include "raid.hpp"
unsigned char original_key[16] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, };
// Dummy key for testing
void test_raid(int argc, char ** argv) {
if (argc != 4) {
std::printf("Wrong usage.\n");
exit(0);
}
size_t bs = atoi(argv[2]);
bs *= 1024;
// KBs
int hq = atoi(argv[3]);
char * file_name = argv[1];
FILE * fp = fopen(file_name, "r");
if (!fp) {
std::printf("Error reading file.\n");
exit(0);
}
struct stat st;
stat(file_name, &st);
size_t file_size = st.st_size;
if (file_size % 128 != 0) {
std::printf("Your file needs padding to align with 16 bytes.\n");
exit(0);
}
//unsigned char * fb = (unsigned char * )malloc(file_size);
unsigned char * h_fb;
checkCudaErrors(cudaMallocHost((void ** )&h_fb, file_size));
fread(h_fb, 1, file_size, fp);
fclose(fp);
std::printf("File read successful.\n");
// File is now read in properly.
raid_benchmark rb(bs, file_size / bs, file_size, (uint8_t * )h_fb, hq);
rb.start();
cudaFree(h_fb);
}
void test_aes(int argc, char ** argv) {
if (argc != 4) {
std::printf("Wrong usage.\n");
exit(0);
}
size_t bs = atoi(argv[2]);
bs *= 1024;
// KBs
int hq = atoi(argv[3]);
char * file_name = argv[1];
FILE * fp = fopen(file_name, "r");
if (!fp) {
std::printf("Error reading file.\n");
exit(0);
}
struct stat st;
stat(file_name, &st);
size_t file_size = st.st_size;
if (file_size % 128 != 0) {
std::printf("Your file needs padding to align with 16 bytes.\n");
exit(0);
}
//unsigned char * fb = (unsigned char * )malloc(file_size);
unsigned char * h_fb;
checkCudaErrors(cudaMallocHost((void ** )&h_fb, file_size));
fread(h_fb, 1, file_size, fp);
fclose(fp);
std::printf("File read successful.\n");
// File is now read in properly.
unsigned int * expanded_key = (unsigned int *) malloc(176);
CPU_KeyExpansion(original_key, expanded_key);
// Key is now expanded.
/*
std::printf("Expanded key: \n");
for(int i = 0; i < 44; i++) {
std::printf("%d ", expanded_key[i]);
if(i % 4 == 0)
std::printf("\n");
}
std::printf("\n");
*/
std::printf("Key is now expanded.\n");
aes_benchmark ab(expanded_key, bs, file_size / bs, file_size, h_fb, hq);
// 4096 = 4KB is the batch_size
ab.start();
// Here we go ...
free(expanded_key);
cudaFree(h_fb);
}
int main(int argc, char ** argv) {
//test_aes(argc, argv);
test_raid(argc, argv);
return 0;
}
|
3f2d1e0d6e37c9a137c081064ac1ac5e9c40a275.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUAPI.h"
#include "CUPOT.h"
#if ( defined GPU && defined GRAVITY )
// Poisson solver prototypes
#if ( POT_SCHEME == SOR )
#ifdef USE_PSOLVER_10TO14
__global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme );
#else
__global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme );
#endif // #ifdef USE_PSOLVER_10TO14 ... else ...
#elif ( POT_SCHEME == MG )
__global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const real dh_Min, const int Max_Iter, const int NPre_Smooth,
const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff,
const IntScheme_t IntScheme );
#endif // POT_SCHEME
// Gravity solver prototypes
#if ( MODEL == HYDRO )
__global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ PS1*PS1*PS1 ],
const real g_Pot_Array_New[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const double g_Corner_Array[][3],
const real g_Pot_Array_USG[][ USG_NXT_G*USG_NXT_G*USG_NXT_G ],
const real g_Flu_Array_USG[][GRA_NIN-1][ PS1*PS1*PS1 ],
char g_DE_Array[][ PS1*PS1*PS1 ],
const real Gra_Const, const bool P5_Gradient, const OptGravityType_t GravityType,
const double TimeNew, const double TimeOld, const real dt, const real dh, const real MinEint );
#elif ( MODEL == MHD )
#warning : WAIT MHD !!!
#elif ( MODEL == ELBDM )
__global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ],
const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const double g_Corner_Array[][3],
const real EtaDt, const real dh, const real Lambda, const bool ExtPot,
const double TimeNew );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
template <typename T> bool Mis_CompareRealValue( const T Input1, const T Input2, const char *comment, const bool Verbose );
// declare all device pointers
extern real (*d_Rho_Array_P )[ RHO_NXT*RHO_NXT*RHO_NXT ];
extern real (*d_Pot_Array_P_In )[ POT_NXT*POT_NXT*POT_NXT ];
extern real (*d_Pot_Array_P_Out)[ GRA_NXT*GRA_NXT*GRA_NXT ];
extern real (*d_Flu_Array_G )[GRA_NIN][ PS1*PS1*PS1 ];
extern double (*d_Corner_Array_G)[3];
#if ( MODEL == HYDRO || MODEL == MHD )
#ifdef UNSPLIT_GRAVITY
extern real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ];
extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ];
#else
static real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ] = NULL;
static real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ] = NULL;
#endif
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_G)[ PS1*PS1*PS1 ];
#else
static char (*d_DE_Array_G)[ PS1*PS1*PS1 ] = NULL;
#endif
#endif // #if ( MODEL == HYDRO || MODEL == MHD )
extern hipStream_t *Stream;
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_Asyn_PoissonGravitySolver
// Description : Invoke the CUPOT_PoissonSolver_XXtoXXcube and/or CUPOT_GravitySolver kernel(s) to evaluate
// the gravitational potential and/or advance the fluid variables by the gravitational
// acceleration for a group of patches
//
// ***********************************************************
// ** Asynchronous Function **
// ** **
// ** will return before the execution in GPU is complete **
// ***********************************************************
//
// Note : a. Use streams for the asychronous memory copy between device and host
// b. Prefix "d" : for pointers pointing to the "Device" memory space
// Prefix "h" : for pointers pointing to the "Host" memory space
//
// Parameter : h_Rho_Array : Host array storing the input density
// h_Pot_Array_In : Host array storing the input "coarse-grid" potential for interpolation
// h_Pot_Array_Out : Host array to store the output potential
// h_Flu_Array : Host array to store the fluid variables for the Gravity solver
// h_Corner_Array : Host array storing the physical corner coordinates of each patch
// h_Pot_Array_USG : Host array storing the prepared potential for UNSPLIT_GRAVITY
// h_Flu_Array_USG : Host array storing the prepared density + momentum for UNSPLIT_GRAVITY
// h_DE_Array : Host array storing the dual-energy status (for both input and output)
// NPatchGroup : Number of patch groups evaluated simultaneously by GPU
// dt : Time interval to advance solution
// dh : Grid size
// SOR_Min_Iter : Minimum # of iterations for SOR
// SOR_Max_Iter : Maximum # of iterations for SOR
// SOR_Omega : Over-relaxation parameter
// MG_Max_Iter : Maximum number of iterations for multigrid
// MG_NPre_Smooth : Number of pre-smoothing steps for multigrid
// MG_NPos_tSmooth : Number of post-smoothing steps for multigrid
// MG_Tolerated_Error : Maximum tolerated error for multigrid
// Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a)
// IntScheme : Interpolation scheme for potential
// --> currently supported schemes include
// INT_CQUAD : conservative quadratic interpolation
// INT_QUAD : quadratic interpolation
// P5_Gradient : Use 5-points stencil to evaluate the potential gradient
// ELBDM_Eta : Particle mass / Planck constant in ELBDM
// ELBDM_Lambda : Quartic self-interaction coefficient in ELBDM
// Poisson : true --> invoke the Poisson solver
// GraAcc : true --> invoke the Gravity solver
// GPU_NStream : Number of CUDA streams for the asynchronous memory copy
// GravityType : Types of gravity --> self-gravity, external gravity, both
// TimeNew : Physical time at the current step (for the external gravity solver)
// TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY)
// ExtPot : Add the external potential
// MinEint : Minimum allowed internal energy (== MIN_PRES / (GAMMA-1))
//
// Useless parameters in HYDRO : ELBDM_Eta, ELBDM_Lambda
// Useless parameters in ELBDM : P5_Gradient
//-------------------------------------------------------------------------------------------------------
void CUAPI_Asyn_PoissonGravitySolver( const real h_Rho_Array [][RHO_NXT][RHO_NXT][RHO_NXT],
const real h_Pot_Array_In [][POT_NXT][POT_NXT][POT_NXT],
real h_Pot_Array_Out[][GRA_NXT][GRA_NXT][GRA_NXT],
real h_Flu_Array [][GRA_NIN][PS1][PS1][PS1],
const double h_Corner_Array[][3],
const real h_Pot_Array_USG[][USG_NXT_G][USG_NXT_G][USG_NXT_G],
const real h_Flu_Array_USG[][GRA_NIN-1][PS1][PS1][PS1],
char h_DE_Array [][PS1][PS1][PS1],
const int NPatchGroup, const real dt, const real dh[], const int SOR_Min_Iter,
const int SOR_Max_Iter, const real SOR_Omega, const int MG_Max_Iter,
const int MG_NPre_Smooth, const int MG_NPost_Smooth,
const real MG_Tolerated_Error, const real Poi_Coeff,
const IntScheme_t IntScheme, const bool P5_Gradient, const real ELBDM_Eta,
const real ELBDM_Lambda, const bool Poisson, const bool GraAcc, const int GPU_NStream,
const OptGravityType_t GravityType, const double TimeNew, const double TimeOld,
const bool ExtPot, const real MinEint )
{
// model-independent constants
# if ( POT_SCHEME == SOR )
const dim3 Poi_Block_Dim( RHO_NXT/2, RHO_NXT, POT_BLOCK_SIZE_Z );
# elif ( POT_SCHEME == MG )
const dim3 Poi_Block_Dim( POT_BLOCK_SIZE_X, 1, 1 );
# endif
const dim3 Gra_Block_Dim( PATCH_SIZE, PATCH_SIZE, GRA_BLOCK_SIZE_Z );
const int NPatch = NPatchGroup*8;
# if ( POT_SCHEME == SOR )
//###: COORD-FIX: use dh instead of dh[0]
const real Poi_Const = Poi_Coeff*dh[0]*dh[0];
const real SOR_Omega_6 = SOR_Omega/6.0;
# endif
// model-dependent constants
# if ( MODEL == HYDRO )
//###: COORD-FIX: use dh instead of dh[0]
const real Gra_Const = ( P5_Gradient ) ? -dt/(12.0*dh[0]) : -dt/(2.0*dh[0]);
# elif ( MODEL == MHD )
# warning : WAIT MHD !!!
# elif ( MODEL == ELBDM )
const real ELBDM_EtaDt = ELBDM_Eta*dt;
# else
# error : ERROR : unsupported MODEL !!
# endif
// check
# if ( MODEL == ELBDM && !defined STORE_POT_GHOST && GRA_GHOST_SIZE != 0 )
# warning : WARNING : GRA_GHOST_SIZE != 0 in ELBDM (without STORE_POT_GHOST) !!
# endif
# ifdef GAMER_DEBUG
const int Poi_NThread = Poi_Block_Dim.x * Poi_Block_Dim.y * Poi_Block_Dim.z;
// minimum number of threads for spatial interpolation
if ( Poisson && Poi_NThread < (POT_NXT-2)*(POT_NXT-2) )
Aux_Error( ERROR_INFO, "Poi_NThread (%d) < (POT_NXT-2)*(POT_NXT-2) (%d) !!\n",
Poi_NThread, (POT_NXT-2)*(POT_NXT-2) );
// constraint due to the reduction operation in "CUPOT_Poisson_10to14cube" and "CUPOT_PoissonSolver_MG"
# if ( ( POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 ) || POT_SCHEME == MG )
if ( Poisson && Poi_NThread < 64 )
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must >= 64) !!\n", "Poi_NThread", Poi_NThread );
# endif
// constraint in "CUPOT_PoissonSolver_SOR_16to18cube"
# if ( POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 )
if ( Poisson && Poi_NThread != RHO_NXT*RHO_NXT/2 )
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must == %d) !!\n", "Poi_NThread", Poi_NThread,
RHO_NXT*RHO_NXT/2 );
# endif
if ( GraAcc )
{
if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot )
{
if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" );
if ( d_Corner_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_G == NULL !!\n" );
}
# ifdef UNSPLIT_GRAVITY
if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH )
{
if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" );
if ( d_Pot_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_G == NULL !!\n" );
}
if ( h_Flu_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Flu_Array_USG == NULL !!\n" );
if ( d_Flu_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Flu_Array_USG_G == NULL !!\n" );
# endif
# ifdef DUAL_ENERGY
if ( h_DE_Array == NULL ) Aux_Error( ERROR_INFO, "h_DE_Array == NULL !!\n" );
if ( d_DE_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_DE_Array_G == NULL !!\n" );
# endif
}
# endif // #ifdef GAMER_DEBUG
if ( Poisson && ( IntScheme != INT_CQUAD && IntScheme != INT_QUAD ) )
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "IntScheme", IntScheme );
# if ( COORDINATE == CARTESIAN )
if ( !Mis_CompareRealValue( dh[0], dh[1], NULL, false ) ||
!Mis_CompareRealValue( dh[0], dh[2], NULL, false ) )
Aux_Error( ERROR_INFO, "currently the Cartesian coordinates only work with cubic cells --> dh = (%20.14e, %20.14e, %20.14e) !!\n",
dh[0], dh[1], dh[2] );
# else
Aux_Error( ERROR_INFO, "non-Cartesian coordinates do not support %s() yet !!\n", __FUNCTION__ );
# endif
int *NPatch_per_Stream = new int [GPU_NStream];
int *Rho_MemSize = new int [GPU_NStream];
int *Pot_MemSize_In = new int [GPU_NStream];
int *Pot_MemSize_Out = new int [GPU_NStream];
int *Flu_MemSize = new int [GPU_NStream];
int *Corner_MemSize = new int [GPU_NStream];
int *UsedPatch = new int [GPU_NStream];
# ifdef UNSPLIT_GRAVITY
int *Pot_USG_MemSize = new int [GPU_NStream];
int *Flu_USG_MemSize = new int [GPU_NStream];
# endif
# ifdef DUAL_ENERGY
int *DE_MemSize = new int [GPU_NStream];
# endif
// set the number of patches in each stream
UsedPatch[0] = 0;
if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch;
else
{
for (int s=0; s<GPU_NStream-1; s++)
{
NPatch_per_Stream[s] = NPatch/GPU_NStream;
UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s];
}
NPatch_per_Stream[GPU_NStream-1] = NPatch - UsedPatch[GPU_NStream-1];
}
// set the size of data to be transferred into GPU in each stream
for (int s=0; s<GPU_NStream; s++)
{
Rho_MemSize [s] = NPatch_per_Stream[s]*CUBE(RHO_NXT )*sizeof(real);
Pot_MemSize_In [s] = NPatch_per_Stream[s]*CUBE(POT_NXT )*sizeof(real);
Pot_MemSize_Out[s] = NPatch_per_Stream[s]*CUBE(GRA_NXT )*sizeof(real);
Flu_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*GRA_NIN;
Corner_MemSize [s] = NPatch_per_Stream[s]*3 *sizeof(double);
# ifdef UNSPLIT_GRAVITY
Pot_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(USG_NXT_G)*sizeof(real);
Flu_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*(GRA_NIN-1);
# endif
# ifdef DUAL_ENERGY
DE_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(char);
# endif
}
// a. copy data from host to device
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
if ( Poisson )
{
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Rho_Array_P + UsedPatch[s], h_Rho_Array + UsedPatch[s],
Rho_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) );
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_P_In + UsedPatch[s], h_Pot_Array_In + UsedPatch[s],
Pot_MemSize_In[s], hipMemcpyHostToDevice, Stream[s] ) );
}
if ( GraAcc )
{
if ( ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) && !Poisson )
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_P_Out + UsedPatch[s], h_Pot_Array_Out + UsedPatch[s],
Pot_MemSize_Out[s], hipMemcpyHostToDevice, Stream[s] ) );
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Flu_Array_G + UsedPatch[s], h_Flu_Array + UsedPatch[s],
Flu_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) );
if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot )
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Corner_Array_G + UsedPatch[s], h_Corner_Array + UsedPatch[s],
Corner_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) );
# ifdef UNSPLIT_GRAVITY
if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH )
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Pot_Array_USG_G + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s],
Pot_USG_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) );
CUDA_CHECK_ERROR( hipMemcpyAsync( d_Flu_Array_USG_G + UsedPatch[s], h_Flu_Array_USG + UsedPatch[s],
Flu_USG_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( hipMemcpyAsync( d_DE_Array_G + UsedPatch[s], h_DE_Array + UsedPatch[s],
DE_MemSize[s], hipMemcpyHostToDevice, Stream[s] ) );
# endif
} // if ( GraAcc )
} // for (int s=0; s<GPU_NStream; s++)
// b. execute the kernel
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
// b1. Poisson solver
if ( Poisson )
{
# if ( POT_SCHEME == SOR )
# ifdef USE_PSOLVER_10TO14
hipLaunchKernelGGL(( CUPOT_PoissonSolver_SOR_10to14cube) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] ,
d_Rho_Array_P + UsedPatch[s],
d_Pot_Array_P_In + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme );
# else
hipLaunchKernelGGL(( CUPOT_PoissonSolver_SOR_16to18cube) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] ,
d_Rho_Array_P + UsedPatch[s],
d_Pot_Array_P_In + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme );
# endif // #ifdef USE_PSOLVER_10TO14 ... else ...
# elif ( POT_SCHEME == MG )
//###: COORD-FIX: use dh instead of dh[0]
hipLaunchKernelGGL(( CUPOT_PoissonSolver_MG) , dim3(NPatch_per_Stream[s]), dim3(Poi_Block_Dim), 0, Stream[s] ,
d_Rho_Array_P + UsedPatch[s],
d_Pot_Array_P_In + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
dh[0], MG_Max_Iter, MG_NPre_Smooth, MG_NPost_Smooth, MG_Tolerated_Error,
Poi_Coeff, IntScheme );
# else
# error : unsupported GPU Poisson solver
# endif // POT_SCHEME
} // if ( Poisson )
// b2. Gravity solver
if ( GraAcc )
{
# if ( MODEL == HYDRO )
//###: COORD-FIX: use dh instead of dh[0]
hipLaunchKernelGGL(( CUPOT_HydroGravitySolver) , dim3(NPatch_per_Stream[s]), dim3(Gra_Block_Dim), 0, Stream[s] ,
d_Flu_Array_G + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
d_Corner_Array_G + UsedPatch[s],
d_Pot_Array_USG_G + UsedPatch[s],
d_Flu_Array_USG_G + UsedPatch[s],
d_DE_Array_G + UsedPatch[s],
Gra_Const, P5_Gradient, GravityType,
TimeNew, TimeOld, dt, dh[0], MinEint );
# elif ( MODEL == MHD )
# warning : WAITH MHD !!!
# elif ( MODEL == ELBDM )
//###: COORD-FIX: use dh instead of dh[0]
hipLaunchKernelGGL(( CUPOT_ELBDMGravitySolver) , dim3(NPatch_per_Stream[s]), dim3(Gra_Block_Dim), 0, Stream[s] ,
d_Flu_Array_G + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
d_Corner_Array_G + UsedPatch[s],
ELBDM_EtaDt, dh[0], ELBDM_Lambda, ExtPot, TimeNew );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
} // if ( GraAcc )
CUDA_CHECK_ERROR( hipGetLastError() );
} // for (int s=0; s<GPU_NStream; s++)
// c. copy data from device to host
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
if ( Poisson )
CUDA_CHECK_ERROR( hipMemcpyAsync( h_Pot_Array_Out + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s],
Pot_MemSize_Out[s], hipMemcpyDeviceToHost, Stream[s] ) );
if ( GraAcc )
{
CUDA_CHECK_ERROR( hipMemcpyAsync( h_Flu_Array + UsedPatch[s], d_Flu_Array_G + UsedPatch[s],
Flu_MemSize[s], hipMemcpyDeviceToHost, Stream[s] ) );
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( hipMemcpyAsync( h_DE_Array + UsedPatch[s], d_DE_Array_G + UsedPatch[s],
DE_MemSize[s], hipMemcpyDeviceToHost, Stream[s] ) );
# endif
}
} // for (int s=0; s<GPU_NStream; s++)
delete [] NPatch_per_Stream;
delete [] Rho_MemSize;
delete [] Pot_MemSize_In;
delete [] Pot_MemSize_Out;
delete [] Flu_MemSize;
delete [] Corner_MemSize;
delete [] UsedPatch;
# ifdef UNSPLIT_GRAVITY
delete [] Pot_USG_MemSize;
delete [] Flu_USG_MemSize;
# endif
# ifdef DUAL_ENERGY
delete [] DE_MemSize;
# endif
} // FUNCTION : CUAPI_Asyn_PoissonGravitySolver
#endif // #if ( defined GPU && defined GRAVITY )
|
3f2d1e0d6e37c9a137c081064ac1ac5e9c40a275.cu
|
#include "CUAPI.h"
#include "CUPOT.h"
#if ( defined GPU && defined GRAVITY )
// Poisson solver prototypes
#if ( POT_SCHEME == SOR )
#ifdef USE_PSOLVER_10TO14
__global__ void CUPOT_PoissonSolver_SOR_10to14cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme );
#else
__global__ void CUPOT_PoissonSolver_SOR_16to18cube( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const int Min_Iter, const int Max_Iter, const real Omega_6,
const real Const, const IntScheme_t IntScheme );
#endif // #ifdef USE_PSOLVER_10TO14 ... else ...
#elif ( POT_SCHEME == MG )
__global__ void CUPOT_PoissonSolver_MG( const real g_Rho_Array [][ RHO_NXT*RHO_NXT*RHO_NXT ],
const real g_Pot_Array_In [][ POT_NXT*POT_NXT*POT_NXT ],
real g_Pot_Array_Out[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const real dh_Min, const int Max_Iter, const int NPre_Smooth,
const int NPost_Smooth, const real Tolerated_Error, const real Poi_Coeff,
const IntScheme_t IntScheme );
#endif // POT_SCHEME
// Gravity solver prototypes
#if ( MODEL == HYDRO )
__global__ void CUPOT_HydroGravitySolver( real g_Flu_Array_New[][GRA_NIN][ PS1*PS1*PS1 ],
const real g_Pot_Array_New[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const double g_Corner_Array[][3],
const real g_Pot_Array_USG[][ USG_NXT_G*USG_NXT_G*USG_NXT_G ],
const real g_Flu_Array_USG[][GRA_NIN-1][ PS1*PS1*PS1 ],
char g_DE_Array[][ PS1*PS1*PS1 ],
const real Gra_Const, const bool P5_Gradient, const OptGravityType_t GravityType,
const double TimeNew, const double TimeOld, const real dt, const real dh, const real MinEint );
#elif ( MODEL == MHD )
#warning : WAIT MHD !!!
#elif ( MODEL == ELBDM )
__global__ void CUPOT_ELBDMGravitySolver( real g_Flu_Array[][GRA_NIN][ PS1*PS1*PS1 ],
const real g_Pot_Array[][ GRA_NXT*GRA_NXT*GRA_NXT ],
const double g_Corner_Array[][3],
const real EtaDt, const real dh, const real Lambda, const bool ExtPot,
const double TimeNew );
#else
#error : ERROR : unsupported MODEL !!
#endif // MODEL
template <typename T> bool Mis_CompareRealValue( const T Input1, const T Input2, const char *comment, const bool Verbose );
// declare all device pointers
extern real (*d_Rho_Array_P )[ RHO_NXT*RHO_NXT*RHO_NXT ];
extern real (*d_Pot_Array_P_In )[ POT_NXT*POT_NXT*POT_NXT ];
extern real (*d_Pot_Array_P_Out)[ GRA_NXT*GRA_NXT*GRA_NXT ];
extern real (*d_Flu_Array_G )[GRA_NIN][ PS1*PS1*PS1 ];
extern double (*d_Corner_Array_G)[3];
#if ( MODEL == HYDRO || MODEL == MHD )
#ifdef UNSPLIT_GRAVITY
extern real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ];
extern real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ];
#else
static real (*d_Pot_Array_USG_G)[ USG_NXT_G*USG_NXT_G*USG_NXT_G ] = NULL;
static real (*d_Flu_Array_USG_G)[GRA_NIN-1][ PS1*PS1*PS1 ] = NULL;
#endif
#ifdef DUAL_ENERGY
extern char (*d_DE_Array_G)[ PS1*PS1*PS1 ];
#else
static char (*d_DE_Array_G)[ PS1*PS1*PS1 ] = NULL;
#endif
#endif // #if ( MODEL == HYDRO || MODEL == MHD )
extern cudaStream_t *Stream;
//-------------------------------------------------------------------------------------------------------
// Function : CUAPI_Asyn_PoissonGravitySolver
// Description : Invoke the CUPOT_PoissonSolver_XXtoXXcube and/or CUPOT_GravitySolver kernel(s) to evaluate
// the gravitational potential and/or advance the fluid variables by the gravitational
// acceleration for a group of patches
//
// ***********************************************************
// ** Asynchronous Function **
// ** **
// ** will return before the execution in GPU is complete **
// ***********************************************************
//
// Note : a. Use streams for the asychronous memory copy between device and host
// b. Prefix "d" : for pointers pointing to the "Device" memory space
// Prefix "h" : for pointers pointing to the "Host" memory space
//
// Parameter : h_Rho_Array : Host array storing the input density
// h_Pot_Array_In : Host array storing the input "coarse-grid" potential for interpolation
// h_Pot_Array_Out : Host array to store the output potential
// h_Flu_Array : Host array to store the fluid variables for the Gravity solver
// h_Corner_Array : Host array storing the physical corner coordinates of each patch
// h_Pot_Array_USG : Host array storing the prepared potential for UNSPLIT_GRAVITY
// h_Flu_Array_USG : Host array storing the prepared density + momentum for UNSPLIT_GRAVITY
// h_DE_Array : Host array storing the dual-energy status (for both input and output)
// NPatchGroup : Number of patch groups evaluated simultaneously by GPU
// dt : Time interval to advance solution
// dh : Grid size
// SOR_Min_Iter : Minimum # of iterations for SOR
// SOR_Max_Iter : Maximum # of iterations for SOR
// SOR_Omega : Over-relaxation parameter
// MG_Max_Iter : Maximum number of iterations for multigrid
// MG_NPre_Smooth : Number of pre-smoothing steps for multigrid
// MG_NPos_tSmooth : Number of post-smoothing steps for multigrid
// MG_Tolerated_Error : Maximum tolerated error for multigrid
// Poi_Coeff : Coefficient in front of density in the Poisson equation (4*Pi*Newton_G*a)
// IntScheme : Interpolation scheme for potential
// --> currently supported schemes include
// INT_CQUAD : conservative quadratic interpolation
// INT_QUAD : quadratic interpolation
// P5_Gradient : Use 5-points stencil to evaluate the potential gradient
// ELBDM_Eta : Particle mass / Planck constant in ELBDM
// ELBDM_Lambda : Quartic self-interaction coefficient in ELBDM
// Poisson : true --> invoke the Poisson solver
// GraAcc : true --> invoke the Gravity solver
// GPU_NStream : Number of CUDA streams for the asynchronous memory copy
// GravityType : Types of gravity --> self-gravity, external gravity, both
// TimeNew : Physical time at the current step (for the external gravity solver)
// TimeOld : Physical time at the previous step (for the external gravity solver in UNSPLIT_GRAVITY)
// ExtPot : Add the external potential
// MinEint : Minimum allowed internal energy (== MIN_PRES / (GAMMA-1))
//
// Useless parameters in HYDRO : ELBDM_Eta, ELBDM_Lambda
// Useless parameters in ELBDM : P5_Gradient
//-------------------------------------------------------------------------------------------------------
void CUAPI_Asyn_PoissonGravitySolver( const real h_Rho_Array [][RHO_NXT][RHO_NXT][RHO_NXT],
const real h_Pot_Array_In [][POT_NXT][POT_NXT][POT_NXT],
real h_Pot_Array_Out[][GRA_NXT][GRA_NXT][GRA_NXT],
real h_Flu_Array [][GRA_NIN][PS1][PS1][PS1],
const double h_Corner_Array[][3],
const real h_Pot_Array_USG[][USG_NXT_G][USG_NXT_G][USG_NXT_G],
const real h_Flu_Array_USG[][GRA_NIN-1][PS1][PS1][PS1],
char h_DE_Array [][PS1][PS1][PS1],
const int NPatchGroup, const real dt, const real dh[], const int SOR_Min_Iter,
const int SOR_Max_Iter, const real SOR_Omega, const int MG_Max_Iter,
const int MG_NPre_Smooth, const int MG_NPost_Smooth,
const real MG_Tolerated_Error, const real Poi_Coeff,
const IntScheme_t IntScheme, const bool P5_Gradient, const real ELBDM_Eta,
const real ELBDM_Lambda, const bool Poisson, const bool GraAcc, const int GPU_NStream,
const OptGravityType_t GravityType, const double TimeNew, const double TimeOld,
const bool ExtPot, const real MinEint )
{
// model-independent constants
# if ( POT_SCHEME == SOR )
const dim3 Poi_Block_Dim( RHO_NXT/2, RHO_NXT, POT_BLOCK_SIZE_Z );
# elif ( POT_SCHEME == MG )
const dim3 Poi_Block_Dim( POT_BLOCK_SIZE_X, 1, 1 );
# endif
const dim3 Gra_Block_Dim( PATCH_SIZE, PATCH_SIZE, GRA_BLOCK_SIZE_Z );
const int NPatch = NPatchGroup*8;
# if ( POT_SCHEME == SOR )
//###: COORD-FIX: use dh instead of dh[0]
const real Poi_Const = Poi_Coeff*dh[0]*dh[0];
const real SOR_Omega_6 = SOR_Omega/6.0;
# endif
// model-dependent constants
# if ( MODEL == HYDRO )
//###: COORD-FIX: use dh instead of dh[0]
const real Gra_Const = ( P5_Gradient ) ? -dt/(12.0*dh[0]) : -dt/(2.0*dh[0]);
# elif ( MODEL == MHD )
# warning : WAIT MHD !!!
# elif ( MODEL == ELBDM )
const real ELBDM_EtaDt = ELBDM_Eta*dt;
# else
# error : ERROR : unsupported MODEL !!
# endif
// check
# if ( MODEL == ELBDM && !defined STORE_POT_GHOST && GRA_GHOST_SIZE != 0 )
# warning : WARNING : GRA_GHOST_SIZE != 0 in ELBDM (without STORE_POT_GHOST) !!
# endif
# ifdef GAMER_DEBUG
const int Poi_NThread = Poi_Block_Dim.x * Poi_Block_Dim.y * Poi_Block_Dim.z;
// minimum number of threads for spatial interpolation
if ( Poisson && Poi_NThread < (POT_NXT-2)*(POT_NXT-2) )
Aux_Error( ERROR_INFO, "Poi_NThread (%d) < (POT_NXT-2)*(POT_NXT-2) (%d) !!\n",
Poi_NThread, (POT_NXT-2)*(POT_NXT-2) );
// constraint due to the reduction operation in "CUPOT_Poisson_10to14cube" and "CUPOT_PoissonSolver_MG"
# if ( ( POT_SCHEME == SOR && defined USE_PSOLVER_10TO14 ) || POT_SCHEME == MG )
if ( Poisson && Poi_NThread < 64 )
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must >= 64) !!\n", "Poi_NThread", Poi_NThread );
# endif
// constraint in "CUPOT_PoissonSolver_SOR_16to18cube"
# if ( POT_SCHEME == SOR && !defined USE_PSOLVER_10TO14 )
if ( Poisson && Poi_NThread != RHO_NXT*RHO_NXT/2 )
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d (must == %d) !!\n", "Poi_NThread", Poi_NThread,
RHO_NXT*RHO_NXT/2 );
# endif
if ( GraAcc )
{
if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot )
{
if ( h_Corner_Array == NULL ) Aux_Error( ERROR_INFO, "h_Corner_Array == NULL !!\n" );
if ( d_Corner_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_Corner_Array_G == NULL !!\n" );
}
# ifdef UNSPLIT_GRAVITY
if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH )
{
if ( h_Pot_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Pot_Array_USG == NULL !!\n" );
if ( d_Pot_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Pot_Array_USG_G == NULL !!\n" );
}
if ( h_Flu_Array_USG == NULL ) Aux_Error( ERROR_INFO, "h_Flu_Array_USG == NULL !!\n" );
if ( d_Flu_Array_USG_G == NULL ) Aux_Error( ERROR_INFO, "d_Flu_Array_USG_G == NULL !!\n" );
# endif
# ifdef DUAL_ENERGY
if ( h_DE_Array == NULL ) Aux_Error( ERROR_INFO, "h_DE_Array == NULL !!\n" );
if ( d_DE_Array_G == NULL ) Aux_Error( ERROR_INFO, "d_DE_Array_G == NULL !!\n" );
# endif
}
# endif // #ifdef GAMER_DEBUG
if ( Poisson && ( IntScheme != INT_CQUAD && IntScheme != INT_QUAD ) )
Aux_Error( ERROR_INFO, "incorrect parameter %s = %d !!\n", "IntScheme", IntScheme );
# if ( COORDINATE == CARTESIAN )
if ( !Mis_CompareRealValue( dh[0], dh[1], NULL, false ) ||
!Mis_CompareRealValue( dh[0], dh[2], NULL, false ) )
Aux_Error( ERROR_INFO, "currently the Cartesian coordinates only work with cubic cells --> dh = (%20.14e, %20.14e, %20.14e) !!\n",
dh[0], dh[1], dh[2] );
# else
Aux_Error( ERROR_INFO, "non-Cartesian coordinates do not support %s() yet !!\n", __FUNCTION__ );
# endif
int *NPatch_per_Stream = new int [GPU_NStream];
int *Rho_MemSize = new int [GPU_NStream];
int *Pot_MemSize_In = new int [GPU_NStream];
int *Pot_MemSize_Out = new int [GPU_NStream];
int *Flu_MemSize = new int [GPU_NStream];
int *Corner_MemSize = new int [GPU_NStream];
int *UsedPatch = new int [GPU_NStream];
# ifdef UNSPLIT_GRAVITY
int *Pot_USG_MemSize = new int [GPU_NStream];
int *Flu_USG_MemSize = new int [GPU_NStream];
# endif
# ifdef DUAL_ENERGY
int *DE_MemSize = new int [GPU_NStream];
# endif
// set the number of patches in each stream
UsedPatch[0] = 0;
if ( GPU_NStream == 1 ) NPatch_per_Stream[0] = NPatch;
else
{
for (int s=0; s<GPU_NStream-1; s++)
{
NPatch_per_Stream[s] = NPatch/GPU_NStream;
UsedPatch[s+1] = UsedPatch[s] + NPatch_per_Stream[s];
}
NPatch_per_Stream[GPU_NStream-1] = NPatch - UsedPatch[GPU_NStream-1];
}
// set the size of data to be transferred into GPU in each stream
for (int s=0; s<GPU_NStream; s++)
{
Rho_MemSize [s] = NPatch_per_Stream[s]*CUBE(RHO_NXT )*sizeof(real);
Pot_MemSize_In [s] = NPatch_per_Stream[s]*CUBE(POT_NXT )*sizeof(real);
Pot_MemSize_Out[s] = NPatch_per_Stream[s]*CUBE(GRA_NXT )*sizeof(real);
Flu_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*GRA_NIN;
Corner_MemSize [s] = NPatch_per_Stream[s]*3 *sizeof(double);
# ifdef UNSPLIT_GRAVITY
Pot_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(USG_NXT_G)*sizeof(real);
Flu_USG_MemSize[s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(real)*(GRA_NIN-1);
# endif
# ifdef DUAL_ENERGY
DE_MemSize [s] = NPatch_per_Stream[s]*CUBE(PS1 )*sizeof(char);
# endif
}
// a. copy data from host to device
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
if ( Poisson )
{
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Rho_Array_P + UsedPatch[s], h_Rho_Array + UsedPatch[s],
Rho_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_P_In + UsedPatch[s], h_Pot_Array_In + UsedPatch[s],
Pot_MemSize_In[s], cudaMemcpyHostToDevice, Stream[s] ) );
}
if ( GraAcc )
{
if ( ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH ) && !Poisson )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_P_Out + UsedPatch[s], h_Pot_Array_Out + UsedPatch[s],
Pot_MemSize_Out[s], cudaMemcpyHostToDevice, Stream[s] ) );
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_G + UsedPatch[s], h_Flu_Array + UsedPatch[s],
Flu_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
if ( GravityType == GRAVITY_EXTERNAL || GravityType == GRAVITY_BOTH || ExtPot )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Corner_Array_G + UsedPatch[s], h_Corner_Array + UsedPatch[s],
Corner_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
# ifdef UNSPLIT_GRAVITY
if ( GravityType == GRAVITY_SELF || GravityType == GRAVITY_BOTH )
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Pot_Array_USG_G + UsedPatch[s], h_Pot_Array_USG + UsedPatch[s],
Pot_USG_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_Flu_Array_USG_G + UsedPatch[s], h_Flu_Array_USG + UsedPatch[s],
Flu_USG_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
# endif
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( cudaMemcpyAsync( d_DE_Array_G + UsedPatch[s], h_DE_Array + UsedPatch[s],
DE_MemSize[s], cudaMemcpyHostToDevice, Stream[s] ) );
# endif
} // if ( GraAcc )
} // for (int s=0; s<GPU_NStream; s++)
// b. execute the kernel
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
// b1. Poisson solver
if ( Poisson )
{
# if ( POT_SCHEME == SOR )
# ifdef USE_PSOLVER_10TO14
CUPOT_PoissonSolver_SOR_10to14cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>>
( d_Rho_Array_P + UsedPatch[s],
d_Pot_Array_P_In + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme );
# else
CUPOT_PoissonSolver_SOR_16to18cube <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>>
( d_Rho_Array_P + UsedPatch[s],
d_Pot_Array_P_In + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
SOR_Min_Iter, SOR_Max_Iter, SOR_Omega_6, Poi_Const, IntScheme );
# endif // #ifdef USE_PSOLVER_10TO14 ... else ...
# elif ( POT_SCHEME == MG )
//###: COORD-FIX: use dh instead of dh[0]
CUPOT_PoissonSolver_MG <<< NPatch_per_Stream[s], Poi_Block_Dim, 0, Stream[s] >>>
( d_Rho_Array_P + UsedPatch[s],
d_Pot_Array_P_In + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
dh[0], MG_Max_Iter, MG_NPre_Smooth, MG_NPost_Smooth, MG_Tolerated_Error,
Poi_Coeff, IntScheme );
# else
# error : unsupported GPU Poisson solver
# endif // POT_SCHEME
} // if ( Poisson )
// b2. Gravity solver
if ( GraAcc )
{
# if ( MODEL == HYDRO )
//###: COORD-FIX: use dh instead of dh[0]
CUPOT_HydroGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>>
( d_Flu_Array_G + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
d_Corner_Array_G + UsedPatch[s],
d_Pot_Array_USG_G + UsedPatch[s],
d_Flu_Array_USG_G + UsedPatch[s],
d_DE_Array_G + UsedPatch[s],
Gra_Const, P5_Gradient, GravityType,
TimeNew, TimeOld, dt, dh[0], MinEint );
# elif ( MODEL == MHD )
# warning : WAITH MHD !!!
# elif ( MODEL == ELBDM )
//###: COORD-FIX: use dh instead of dh[0]
CUPOT_ELBDMGravitySolver <<< NPatch_per_Stream[s], Gra_Block_Dim, 0, Stream[s] >>>
( d_Flu_Array_G + UsedPatch[s],
d_Pot_Array_P_Out + UsedPatch[s],
d_Corner_Array_G + UsedPatch[s],
ELBDM_EtaDt, dh[0], ELBDM_Lambda, ExtPot, TimeNew );
# else
# error : ERROR : unsupported MODEL !!
# endif // MODEL
} // if ( GraAcc )
CUDA_CHECK_ERROR( cudaGetLastError() );
} // for (int s=0; s<GPU_NStream; s++)
// c. copy data from device to host
//=========================================================================================
for (int s=0; s<GPU_NStream; s++)
{
if ( NPatch_per_Stream[s] == 0 ) continue;
if ( Poisson )
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Pot_Array_Out + UsedPatch[s], d_Pot_Array_P_Out + UsedPatch[s],
Pot_MemSize_Out[s], cudaMemcpyDeviceToHost, Stream[s] ) );
if ( GraAcc )
{
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_Flu_Array + UsedPatch[s], d_Flu_Array_G + UsedPatch[s],
Flu_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
# ifdef DUAL_ENERGY
CUDA_CHECK_ERROR( cudaMemcpyAsync( h_DE_Array + UsedPatch[s], d_DE_Array_G + UsedPatch[s],
DE_MemSize[s], cudaMemcpyDeviceToHost, Stream[s] ) );
# endif
}
} // for (int s=0; s<GPU_NStream; s++)
delete [] NPatch_per_Stream;
delete [] Rho_MemSize;
delete [] Pot_MemSize_In;
delete [] Pot_MemSize_Out;
delete [] Flu_MemSize;
delete [] Corner_MemSize;
delete [] UsedPatch;
# ifdef UNSPLIT_GRAVITY
delete [] Pot_USG_MemSize;
delete [] Flu_USG_MemSize;
# endif
# ifdef DUAL_ENERGY
delete [] DE_MemSize;
# endif
} // FUNCTION : CUAPI_Asyn_PoissonGravitySolver
#endif // #if ( defined GPU && defined GRAVITY )
|
ca8514b78c6df85f49d38b48ad5f191b51391b63.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__device__ int roundToInt(float val)
{
return (int)floor(val + 0.5f);
}
__device__ float d_priorF; __global__ void add(float *p, float *q) { *p += *q; }
__global__ void composeSingleSystem(const size_t offset, const float *H, const size_t lowresWidth, const size_t lowresHeight, const size_t highresWidth, const size_t highresHeight, const float psfWidth, const int pixelRadius, float *systemMatrixVals, int *systemMatrixCols, int *systemMatrixRows)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
const size_t lowresPixels = lowresWidth * lowresHeight;
if (idx >= lowresPixels)
return;
// Coordinates of this thread in the low-res image
size_t x = idx % lowresWidth;
size_t y = idx / lowresWidth;
// Row that this thread writes in the full system matrix
size_t r = idx + offset;
// Transform pixel coordinates from the LR grid to the desired HR grid
float hrx, hry;
float zoom = float(highresWidth) / float(lowresWidth);
hrx = (H[0] * x + H[1] * y + H[2]) * zoom;
hry = (H[3] * x + H[4] * y + H[5]) * zoom;
float weightSum = 0.0f;
const size_t maxRowElems = (2 * pixelRadius + 1) * (2 * pixelRadius + 1);
size_t offsetCRS = 0;
size_t offsetRows = maxRowElems * r;
// Iterate over the neighborhood defined by the width of the psf
for (int offsetY = -pixelRadius; offsetY <= pixelRadius; ++offsetY)
{
const int ny = roundToInt(hry + offsetY);
if (ny < 0 || ny >= highresHeight)
continue;
for (int offsetX = -pixelRadius; offsetX <= pixelRadius; ++offsetX)
{
const int nx = roundToInt(hrx + offsetX);
if (nx < 0 || nx >= highresWidth)
continue;
const float dx = hrx - float(nx);
const float dy = hry - float(ny);
// Compute influence of current high-res pixel for
// this thread's low-res pixel
float dist = dx*dx*H[0]*H[0] + dy*dy*H[4]*H[4] +
dx*dy*H[0]*H[3] + dx*dy*H[1]*H[4];
float weight = expf(-dist / (2.0f * zoom * zoom * psfWidth * psfWidth));
const size_t valIdx = offsetRows + offsetCRS;
systemMatrixVals[valIdx] = weight;
systemMatrixCols[valIdx] = ny * highresWidth + nx;
weightSum += weight;
++offsetCRS;
}
}
if (weightSum > 0.0f)
{
// Normalize row sums
for (size_t i = 0; i < offsetCRS; ++i)
{
systemMatrixVals[offsetRows + i] /= weightSum;
}
}
// If we have saved less than maxRowElems elements,
// we have to pad the CRS structure with 0 entries
// to make sure it is valid
if (offsetCRS == 0)
{
systemMatrixVals[offsetRows] = 0.0f;
systemMatrixCols[offsetRows] = 0;
++offsetCRS;
}
bool copy = false;
// Try adding elements after the last saved entry
while (offsetCRS < maxRowElems)
{
const size_t idx = offsetRows + offsetCRS;
if (systemMatrixCols[idx - 1] + 1 >= highresWidth * highresHeight)
{
copy = true;
break;
}
systemMatrixVals[idx] = 0.0f;
systemMatrixCols[idx] = systemMatrixCols[idx - 1] + 1;
offsetCRS++;
}
// If there isn't enough space after the last saved
// entry, add padding before first entry
if (copy)
{
for (int idx = offsetCRS - 1; idx >= 0; --idx)
{
systemMatrixVals[offsetRows + maxRowElems - (offsetCRS - idx)] =
systemMatrixVals[offsetRows + idx];
systemMatrixCols[offsetRows + maxRowElems - (offsetCRS - idx)] =
systemMatrixCols[offsetRows + idx];
}
for (int idx = maxRowElems - offsetCRS - 1; idx >= 0; --idx)
{
systemMatrixVals[offsetRows + idx] = 0.0f;
systemMatrixCols[offsetRows + idx] = systemMatrixCols[offsetRows + idx + 1] - 1;
}
}
systemMatrixRows[r] = r * maxRowElems;
}
|
ca8514b78c6df85f49d38b48ad5f191b51391b63.cu
|
#include "includes.h"
__device__ int roundToInt(float val)
{
return (int)floor(val + 0.5f);
}
__device__ float d_priorF; __global__ void add(float *p, float *q) { *p += *q; }
__global__ void composeSingleSystem(const size_t offset, const float *H, const size_t lowresWidth, const size_t lowresHeight, const size_t highresWidth, const size_t highresHeight, const float psfWidth, const int pixelRadius, float *systemMatrixVals, int *systemMatrixCols, int *systemMatrixRows)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
const size_t lowresPixels = lowresWidth * lowresHeight;
if (idx >= lowresPixels)
return;
// Coordinates of this thread in the low-res image
size_t x = idx % lowresWidth;
size_t y = idx / lowresWidth;
// Row that this thread writes in the full system matrix
size_t r = idx + offset;
// Transform pixel coordinates from the LR grid to the desired HR grid
float hrx, hry;
float zoom = float(highresWidth) / float(lowresWidth);
hrx = (H[0] * x + H[1] * y + H[2]) * zoom;
hry = (H[3] * x + H[4] * y + H[5]) * zoom;
float weightSum = 0.0f;
const size_t maxRowElems = (2 * pixelRadius + 1) * (2 * pixelRadius + 1);
size_t offsetCRS = 0;
size_t offsetRows = maxRowElems * r;
// Iterate over the neighborhood defined by the width of the psf
for (int offsetY = -pixelRadius; offsetY <= pixelRadius; ++offsetY)
{
const int ny = roundToInt(hry + offsetY);
if (ny < 0 || ny >= highresHeight)
continue;
for (int offsetX = -pixelRadius; offsetX <= pixelRadius; ++offsetX)
{
const int nx = roundToInt(hrx + offsetX);
if (nx < 0 || nx >= highresWidth)
continue;
const float dx = hrx - float(nx);
const float dy = hry - float(ny);
// Compute influence of current high-res pixel for
// this thread's low-res pixel
float dist = dx*dx*H[0]*H[0] + dy*dy*H[4]*H[4] +
dx*dy*H[0]*H[3] + dx*dy*H[1]*H[4];
float weight = expf(-dist / (2.0f * zoom * zoom * psfWidth * psfWidth));
const size_t valIdx = offsetRows + offsetCRS;
systemMatrixVals[valIdx] = weight;
systemMatrixCols[valIdx] = ny * highresWidth + nx;
weightSum += weight;
++offsetCRS;
}
}
if (weightSum > 0.0f)
{
// Normalize row sums
for (size_t i = 0; i < offsetCRS; ++i)
{
systemMatrixVals[offsetRows + i] /= weightSum;
}
}
// If we have saved less than maxRowElems elements,
// we have to pad the CRS structure with 0 entries
// to make sure it is valid
if (offsetCRS == 0)
{
systemMatrixVals[offsetRows] = 0.0f;
systemMatrixCols[offsetRows] = 0;
++offsetCRS;
}
bool copy = false;
// Try adding elements after the last saved entry
while (offsetCRS < maxRowElems)
{
const size_t idx = offsetRows + offsetCRS;
if (systemMatrixCols[idx - 1] + 1 >= highresWidth * highresHeight)
{
copy = true;
break;
}
systemMatrixVals[idx] = 0.0f;
systemMatrixCols[idx] = systemMatrixCols[idx - 1] + 1;
offsetCRS++;
}
// If there isn't enough space after the last saved
// entry, add padding before first entry
if (copy)
{
for (int idx = offsetCRS - 1; idx >= 0; --idx)
{
systemMatrixVals[offsetRows + maxRowElems - (offsetCRS - idx)] =
systemMatrixVals[offsetRows + idx];
systemMatrixCols[offsetRows + maxRowElems - (offsetCRS - idx)] =
systemMatrixCols[offsetRows + idx];
}
for (int idx = maxRowElems - offsetCRS - 1; idx >= 0; --idx)
{
systemMatrixVals[offsetRows + idx] = 0.0f;
systemMatrixCols[offsetRows + idx] = systemMatrixCols[offsetRows + idx + 1] - 1;
}
}
systemMatrixRows[r] = r * maxRowElems;
}
|
34b048ca935b7994b447acfba832aa55762d1958.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "ARR_ADDC.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *in1 = NULL;
hipMalloc(&in1, XSIZE*YSIZE);
float *in2 = NULL;
hipMalloc(&in2, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
ARR_ADDC), dim3(gridBlock),dim3(threadBlock), 0, 0, result,in1,in2,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
ARR_ADDC), dim3(gridBlock),dim3(threadBlock), 0, 0, result,in1,in2,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
ARR_ADDC), dim3(gridBlock),dim3(threadBlock), 0, 0, result,in1,in2,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
34b048ca935b7994b447acfba832aa55762d1958.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "ARR_ADDC.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *in1 = NULL;
cudaMalloc(&in1, XSIZE*YSIZE);
float *in2 = NULL;
cudaMalloc(&in2, XSIZE*YSIZE);
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
ARR_ADDC<<<gridBlock,threadBlock>>>(result,in1,in2,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
ARR_ADDC<<<gridBlock,threadBlock>>>(result,in1,in2,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
ARR_ADDC<<<gridBlock,threadBlock>>>(result,in1,in2,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
ef2f5142005c0859cae8626b8b2f43ce6a3eac6f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*! \page Vector_1_gpu_first_step Vector 1 GPU first step
*
*
* [TOC]
*
*
* # GPU first steps # {#GPU_first_steps}
*
*
* This example shows how to use GPUs in OpenFPM step by step. To start to use GPUs with vectors_dist, this example is a good
* starting point. On the other hand we suggest to read the example \ref{simple_vector_example} before this example.
*
* ## Data structures {#GPU_data_structures}
*
* While a cpu data-structure can be created with vector_dist a gpu data-structure can be created with vector_dist_gpu.
* The GPU vector_dist expose the same CPU interface with additional functionalities. This means that a vector_dist can be
* changed to vector_dist_gpu without changing a single line of code. This is an important feature because give us the possibility
* to change our code from CPU to GPU incrementally step-by-step. A small sections of code can be moved to GPU leaving the rest
* unchanged. The documentation of vector_dist_gpu is the same ad vector_dist, the extended functionality is documented in
* vector_dist. Every file containing vector_dist_gpu must be compiled with nvcc compiler, so we suggest to use the extension
* *.cu for such files and add a rule to compile cu files using nvcc. An example of how to do it can be seen checking the Makefile
* in this example.
*
* While changing from vector_dist to vector_dist_gpu, seem not producing any effect. There are some underling change that take
* effect:
*
* * The internal memory used to allocate memory is not anymore simple heap memory, but is CUDA pinned host memory.
* * Buffers are allocated for each property.
* * Vector properties like float[3] and float[3][3] has a different layout in memory from the standard (It will be
* clear how and why later in the example)
*
* This code snippet below shows how vector_dist_gpu can be used like vector_dist in \ref{simple_vector_example}.
* In short we create a set of 100 particles (vector_dist_gpu) in 2d from {0.0,0.0} to {1.0,1.0}. Particles are
* randomly placed in such space. The final map redistribute such particles accordingly to the decomposition.
*
* \snippet Vector/1_gpu_first_step/main.cu cpu_like_gpu
*
* ### Offload data to GPU ###
*
* To offload data to GPU you can use the function hostToDevicePos to offload particle position and hostToDeviceProp to offload
* properties data. This latest function take template parameters to specify which properties to offload. Here we offload all
* the properties (scalar,vector,tensor)
*
* \snippet Vector/1_gpu_first_step/main.cu offload_pos_prop
*
* Once the data has been offload we can launch a kernel to do some computation. All data-structure gpu ready has a function
* called toKernel that give the possibility to pass the data-structure to the kernel and be used inside the kernel, like is
* used on CPU. Lanuching a kernel on cuda require the subdivision of of a loop in workgroups and threads in the workgroups.
* OpenFPM provides the function getDomainIteratorGPU to automatically split the domain particle loop. The members wthr and
* thr can be used in the <<<...>>> brachet to launch a CUDA kernel.
*
* \snippet Vector/1_gpu_first_step/main.cu launch_domain_it
*
* The kernel is the definition of a normal CUDA kernel. We use template parameters for every parameter that is passed with toKernel()
*
* \note The definition of the arguments toKernel() as template parameter give us the possibility to use the template engine
* to do type deduction and avoid to specify the real-type returned by toKernel()
*
* The kernel simply shift the particles by 0.05. Set the scalar properties to the sum of x and y of the "old" particle position,
* set the vector properties to the old particle position, and set the tensor to several combination of x and y "old" particle
* position
*
* \snippet Vector/1_gpu_first_step/main.cu kernel_translate_fill_prop
*
* Once the computation is completed we can ask to reoffload the data from device to host and write the results to file.
*
* \note Every file writer requires that the data are offloaded on host memory
*
* \snippet Vector/1_gpu_first_step/main.cu device_to_host_write
*
* \htmlonly
* <img src="http://openfpm.mpi-cbg.de/web/images/examples/1_gpu_first_step/output.png"/>
* \endhtmlonly
*
* ## map and ghost_get for multi GPU
*
* Until here we saw how to move data from host to device, device to host and how to launch a CUDA kernel on off-loaded data.
* As previously mentioned vector_dist_gpu has the same CPU interface and so provide the standard function map and ghost_get that work
* on host pinned memory. Because we want to avoid to move data from GPU to host memory. To avoid it we can use map with the option
* RUN_DEVICE to redistribute the particles directly on GPU, and ghost_get with RUN_DEVICE to fill ghost particles directly on GPU.
* In the loop below we see how we can use map on a particle set that is already on GPU. In particular we never offload particles on CPU
* to do map or ghost_get. We use the kernel translate_fill_prop, to translate the particles and update the properties. The only offload
* happen every 10 time-step to write on file.
*
* \snippet Vector/1_gpu_first_step/main.cu map_and_ghost_get_on_gpu
*
* ## RDMA on MPI with CUDA
*
* Today MPI implementations are able to do RDMA on GPU memory. This in practice mean that Infiniband card can directly read
* GPU memory transfer over infiniband and write on the other node directly on GPU, without moving the data to system memory.
* In practice means that MPI calls can work directly on CUDA device pointers. OpenFPM can exploit this feature if MPI is compiled
* with CUDA support. To check if MPI is compiled with CUDA support use the function \b is_mpi_rdma_cuda_active() \b
*
* \snippet Vector/1_gpu_first_step/main.cu performance_rdma
*
* It is good to note that in order to work (return true), some condition must be met.
*
* * Because at the moment OpenFPM sense OpenMPI CUDA aware implementation we must define the \b OPENMPI \b macro
* \snippet Vector/1_gpu_first_step/main.cu using_openmpi
*
* * MPI must be compiled with CUDA support (in general installing OpenFPM with -g should attempt to install OpenMPI with CUDA support)
*
* ## Full code ## {#code_e0_sim}
*
* \include Vector/1_gpu_first_step/main.cu
*
*/
#ifdef __NVCC__
//! \cond [using_openmpi] \endcond
#define OPENMPI
//! \cond [using_openmpi] \endcond
//#define SCAN_WITH_CUB <------ MODERNGPU is broken on RTX use CUB library for scan
//#define EXTERNAL_SET_GPU <----- In case you want to distribute the GPUs differently from the default
#include "Vector/vector_dist.hpp"
//! \cond [kernel_translate_fill_prop] \endcond
template<typename vector_type>
__global__ void translate_fill_prop(vector_type vd)
{
auto p = GET_PARTICLE(vd);
vd.template getProp<0>(p) = vd.getPos(p)[0] + vd.getPos(p)[1];
vd.template getProp<1>(p)[0] = vd.getPos(p)[0];
vd.template getProp<1>(p)[1] = vd.getPos(p)[1];
vd.template getProp<2>(p)[0][0] = vd.getPos(p)[0];
vd.template getProp<2>(p)[0][1] = vd.getPos(p)[1];
vd.template getProp<2>(p)[1][0] = vd.getPos(p)[0] + vd.getPos(p)[1];
vd.template getProp<2>(p)[1][1] = vd.getPos(p)[1] - vd.getPos(p)[0];
vd.getPos(p)[0] += 0.01f;
vd.getPos(p)[1] += 0.01f;
}
//! \cond [kernel_translate_fill_prop] \endcond
int main(int argc, char* argv[])
{
// OpenFPM GPU distribution
// OpenFPM by default select GPU 0 for process 0, gpu 1 for process 1 and so on ... . In case of multi-node is the same each node has
// has a group of processes and these group of processes are distributed across the available GPU on that node.
// If you want to override this behaviour use #define EXTERNAL_SET_GPU at the very beginning of the program and use
// hipSetDevice to select the GPU for that particular process before openfpm_init
// Note: To get the process number do MPI_Init and and use the MPI_Comm_rank. VCluster is not available before openfpm_init
// A code snippet in case we want to skip GPU 0
// MPI_Init(&argc,&argv);
// int rank;
// MPI_Comm_rank(MPI_COMM_WORLD,&rank);
// hipSetDevice(1+rank);
//! \cond [cpu_like_gpu] \endcond
// initialize the library
openfpm_init(&argc,&argv);
// Here we define our domain a 2D box with internals from 0 to 1.0 for x and y
Box<2,float> domain({0.0,0.0},{1.0,1.0});
// Here we define the boundary conditions of our problem
size_t bc[2]={PERIODIC,PERIODIC};
// extended boundary around the domain, and the processor domain
Ghost<2,float> g(0.05);
vector_dist_gpu<2,float, aggregate<float,float[2],float[2][2]> > vd(100,domain,bc,g);
// the scalar is the element at position 0 in the aggregate
const int scalar = 0;
// the vector is the element at position 1 in the aggregate
const int vector = 1;
// the tensor is the element at position 2 in the aggregate
const int tensor = 2;
auto it = vd.getDomainIterator();
while (it.isNext())
{
auto key = it.get();
// we define x, assign a random position between 0.0 and 1.0
vd.getPos(key)[0] = (float)rand() / RAND_MAX;
// we define y, assign a random position between 0.0 and 1.0
vd.getPos(key)[1] = (float)rand() / RAND_MAX;
// next particle
++it;
}
vd.map();
//! \cond [cpu_like_gpu] \endcond
//! \cond [offload_pos_prop] \endcond
vd.hostToDevicePos();
vd.template hostToDeviceProp<scalar,vector,tensor>();
//! \cond [offload_pos_prop] \endcond
//! \cond [launch_domain_it] \endcond
auto ite = vd.getDomainIteratorGPU();
hipLaunchKernelGGL(( translate_fill_prop), dim3(ite.wthr),dim3(ite.thr), 0, 0, vd.toKernel());
//! \cond [launch_domain_it] \endcond
//! \cond [device_to_host_write] \endcond
vd.deviceToHostPos();
vd.deviceToHostProp<0,1,2>();
// We write on a file
vd.write("output");
//! \cond [device_to_host_write] \endcond
//! \cond [map_and_ghost_get_on_gpu] \endcond
for (int j = 0 ; j < 100 ; j++)
{
auto ite = vd.getDomainIteratorGPU();
hipLaunchKernelGGL(( translate_fill_prop), dim3(ite.wthr),dim3(ite.thr), 0, 0, vd.toKernel());
vd.map(RUN_ON_DEVICE);
vd.template ghost_get<0,1,2>(RUN_ON_DEVICE);
if ( j % 10 == 0)
{
// offload to host
vd.deviceToHostPos();
vd.template deviceToHostProp<0,1,2>();
// write
vd.write_frame("output_f",j);
}
}
//! \cond [map_and_ghost_get_on_gpu] \endcond
//! \cond [performance_rdma] \endcond
bool active = is_mpi_rdma_cuda_active();
std::cout << "Is MPI rdma active on CUDA " << active << std::endl;
//! \cond [performance_rdma] \endcond
openfpm_finalize();
}
#else
int main(int argc, char* argv[])
{
return 0;
}
#endif
|
ef2f5142005c0859cae8626b8b2f43ce6a3eac6f.cu
|
/*! \page Vector_1_gpu_first_step Vector 1 GPU first step
*
*
* [TOC]
*
*
* # GPU first steps # {#GPU_first_steps}
*
*
* This example shows how to use GPUs in OpenFPM step by step. To start to use GPUs with vectors_dist, this example is a good
* starting point. On the other hand we suggest to read the example \ref{simple_vector_example} before this example.
*
* ## Data structures {#GPU_data_structures}
*
* While a cpu data-structure can be created with vector_dist a gpu data-structure can be created with vector_dist_gpu.
* The GPU vector_dist expose the same CPU interface with additional functionalities. This means that a vector_dist can be
* changed to vector_dist_gpu without changing a single line of code. This is an important feature because give us the possibility
* to change our code from CPU to GPU incrementally step-by-step. A small sections of code can be moved to GPU leaving the rest
* unchanged. The documentation of vector_dist_gpu is the same ad vector_dist, the extended functionality is documented in
* vector_dist. Every file containing vector_dist_gpu must be compiled with nvcc compiler, so we suggest to use the extension
* *.cu for such files and add a rule to compile cu files using nvcc. An example of how to do it can be seen checking the Makefile
* in this example.
*
* While changing from vector_dist to vector_dist_gpu, seem not producing any effect. There are some underling change that take
* effect:
*
* * The internal memory used to allocate memory is not anymore simple heap memory, but is CUDA pinned host memory.
* * Buffers are allocated for each property.
* * Vector properties like float[3] and float[3][3] has a different layout in memory from the standard (It will be
* clear how and why later in the example)
*
* This code snippet below shows how vector_dist_gpu can be used like vector_dist in \ref{simple_vector_example}.
* In short we create a set of 100 particles (vector_dist_gpu) in 2d from {0.0,0.0} to {1.0,1.0}. Particles are
* randomly placed in such space. The final map redistribute such particles accordingly to the decomposition.
*
* \snippet Vector/1_gpu_first_step/main.cu cpu_like_gpu
*
* ### Offload data to GPU ###
*
* To offload data to GPU you can use the function hostToDevicePos to offload particle position and hostToDeviceProp to offload
* properties data. This latest function take template parameters to specify which properties to offload. Here we offload all
* the properties (scalar,vector,tensor)
*
* \snippet Vector/1_gpu_first_step/main.cu offload_pos_prop
*
* Once the data has been offload we can launch a kernel to do some computation. All data-structure gpu ready has a function
* called toKernel that give the possibility to pass the data-structure to the kernel and be used inside the kernel, like is
* used on CPU. Lanuching a kernel on cuda require the subdivision of of a loop in workgroups and threads in the workgroups.
* OpenFPM provides the function getDomainIteratorGPU to automatically split the domain particle loop. The members wthr and
* thr can be used in the <<<...>>> brachet to launch a CUDA kernel.
*
* \snippet Vector/1_gpu_first_step/main.cu launch_domain_it
*
* The kernel is the definition of a normal CUDA kernel. We use template parameters for every parameter that is passed with toKernel()
*
* \note The definition of the arguments toKernel() as template parameter give us the possibility to use the template engine
* to do type deduction and avoid to specify the real-type returned by toKernel()
*
* The kernel simply shift the particles by 0.05. Set the scalar properties to the sum of x and y of the "old" particle position,
* set the vector properties to the old particle position, and set the tensor to several combination of x and y "old" particle
* position
*
* \snippet Vector/1_gpu_first_step/main.cu kernel_translate_fill_prop
*
* Once the computation is completed we can ask to reoffload the data from device to host and write the results to file.
*
* \note Every file writer requires that the data are offloaded on host memory
*
* \snippet Vector/1_gpu_first_step/main.cu device_to_host_write
*
* \htmlonly
* <img src="http://openfpm.mpi-cbg.de/web/images/examples/1_gpu_first_step/output.png"/>
* \endhtmlonly
*
* ## map and ghost_get for multi GPU
*
* Until here we saw how to move data from host to device, device to host and how to launch a CUDA kernel on off-loaded data.
* As previously mentioned vector_dist_gpu has the same CPU interface and so provide the standard function map and ghost_get that work
* on host pinned memory. Because we want to avoid to move data from GPU to host memory. To avoid it we can use map with the option
* RUN_DEVICE to redistribute the particles directly on GPU, and ghost_get with RUN_DEVICE to fill ghost particles directly on GPU.
* In the loop below we see how we can use map on a particle set that is already on GPU. In particular we never offload particles on CPU
* to do map or ghost_get. We use the kernel translate_fill_prop, to translate the particles and update the properties. The only offload
* happen every 10 time-step to write on file.
*
* \snippet Vector/1_gpu_first_step/main.cu map_and_ghost_get_on_gpu
*
* ## RDMA on MPI with CUDA
*
* Today MPI implementations are able to do RDMA on GPU memory. This in practice mean that Infiniband card can directly read
* GPU memory transfer over infiniband and write on the other node directly on GPU, without moving the data to system memory.
* In practice means that MPI calls can work directly on CUDA device pointers. OpenFPM can exploit this feature if MPI is compiled
* with CUDA support. To check if MPI is compiled with CUDA support use the function \b is_mpi_rdma_cuda_active() \b
*
* \snippet Vector/1_gpu_first_step/main.cu performance_rdma
*
* It is good to note that in order to work (return true), some condition must be met.
*
* * Because at the moment OpenFPM sense OpenMPI CUDA aware implementation we must define the \b OPENMPI \b macro
* \snippet Vector/1_gpu_first_step/main.cu using_openmpi
*
* * MPI must be compiled with CUDA support (in general installing OpenFPM with -g should attempt to install OpenMPI with CUDA support)
*
* ## Full code ## {#code_e0_sim}
*
* \include Vector/1_gpu_first_step/main.cu
*
*/
#ifdef __NVCC__
//! \cond [using_openmpi] \endcond
#define OPENMPI
//! \cond [using_openmpi] \endcond
//#define SCAN_WITH_CUB <------ MODERNGPU is broken on RTX use CUB library for scan
//#define EXTERNAL_SET_GPU <----- In case you want to distribute the GPUs differently from the default
#include "Vector/vector_dist.hpp"
//! \cond [kernel_translate_fill_prop] \endcond
template<typename vector_type>
__global__ void translate_fill_prop(vector_type vd)
{
auto p = GET_PARTICLE(vd);
vd.template getProp<0>(p) = vd.getPos(p)[0] + vd.getPos(p)[1];
vd.template getProp<1>(p)[0] = vd.getPos(p)[0];
vd.template getProp<1>(p)[1] = vd.getPos(p)[1];
vd.template getProp<2>(p)[0][0] = vd.getPos(p)[0];
vd.template getProp<2>(p)[0][1] = vd.getPos(p)[1];
vd.template getProp<2>(p)[1][0] = vd.getPos(p)[0] + vd.getPos(p)[1];
vd.template getProp<2>(p)[1][1] = vd.getPos(p)[1] - vd.getPos(p)[0];
vd.getPos(p)[0] += 0.01f;
vd.getPos(p)[1] += 0.01f;
}
//! \cond [kernel_translate_fill_prop] \endcond
int main(int argc, char* argv[])
{
// OpenFPM GPU distribution
// OpenFPM by default select GPU 0 for process 0, gpu 1 for process 1 and so on ... . In case of multi-node is the same each node has
// has a group of processes and these group of processes are distributed across the available GPU on that node.
// If you want to override this behaviour use #define EXTERNAL_SET_GPU at the very beginning of the program and use
// cudaSetDevice to select the GPU for that particular process before openfpm_init
// Note: To get the process number do MPI_Init and and use the MPI_Comm_rank. VCluster is not available before openfpm_init
// A code snippet in case we want to skip GPU 0
// MPI_Init(&argc,&argv);
// int rank;
// MPI_Comm_rank(MPI_COMM_WORLD,&rank);
// cudaSetDevice(1+rank);
//! \cond [cpu_like_gpu] \endcond
// initialize the library
openfpm_init(&argc,&argv);
// Here we define our domain a 2D box with internals from 0 to 1.0 for x and y
Box<2,float> domain({0.0,0.0},{1.0,1.0});
// Here we define the boundary conditions of our problem
size_t bc[2]={PERIODIC,PERIODIC};
// extended boundary around the domain, and the processor domain
Ghost<2,float> g(0.05);
vector_dist_gpu<2,float, aggregate<float,float[2],float[2][2]> > vd(100,domain,bc,g);
// the scalar is the element at position 0 in the aggregate
const int scalar = 0;
// the vector is the element at position 1 in the aggregate
const int vector = 1;
// the tensor is the element at position 2 in the aggregate
const int tensor = 2;
auto it = vd.getDomainIterator();
while (it.isNext())
{
auto key = it.get();
// we define x, assign a random position between 0.0 and 1.0
vd.getPos(key)[0] = (float)rand() / RAND_MAX;
// we define y, assign a random position between 0.0 and 1.0
vd.getPos(key)[1] = (float)rand() / RAND_MAX;
// next particle
++it;
}
vd.map();
//! \cond [cpu_like_gpu] \endcond
//! \cond [offload_pos_prop] \endcond
vd.hostToDevicePos();
vd.template hostToDeviceProp<scalar,vector,tensor>();
//! \cond [offload_pos_prop] \endcond
//! \cond [launch_domain_it] \endcond
auto ite = vd.getDomainIteratorGPU();
translate_fill_prop<<<ite.wthr,ite.thr>>>(vd.toKernel());
//! \cond [launch_domain_it] \endcond
//! \cond [device_to_host_write] \endcond
vd.deviceToHostPos();
vd.deviceToHostProp<0,1,2>();
// We write on a file
vd.write("output");
//! \cond [device_to_host_write] \endcond
//! \cond [map_and_ghost_get_on_gpu] \endcond
for (int j = 0 ; j < 100 ; j++)
{
auto ite = vd.getDomainIteratorGPU();
translate_fill_prop<<<ite.wthr,ite.thr>>>(vd.toKernel());
vd.map(RUN_ON_DEVICE);
vd.template ghost_get<0,1,2>(RUN_ON_DEVICE);
if ( j % 10 == 0)
{
// offload to host
vd.deviceToHostPos();
vd.template deviceToHostProp<0,1,2>();
// write
vd.write_frame("output_f",j);
}
}
//! \cond [map_and_ghost_get_on_gpu] \endcond
//! \cond [performance_rdma] \endcond
bool active = is_mpi_rdma_cuda_active();
std::cout << "Is MPI rdma active on CUDA " << active << std::endl;
//! \cond [performance_rdma] \endcond
openfpm_finalize();
}
#else
int main(int argc, char* argv[])
{
return 0;
}
#endif
|
571878989e95495982c168bd62a2739858942750.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/im2col.cuh>
namespace at {
namespace native {
namespace {
static inline void conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_height,
int kernel_width,
int stride_height,
int stride_width,
int pad_height,
int pad_width,
int output_padding_height,
int output_padding_width,
int dilation_height,
int dilation_width,
bool weight_nullable) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation, ",
"but got output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && (weight.dim() == 2 || weight.dim() == 4),
"non-empty 2D or 4D weight tensor expected, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(
input.numel() != 0 && (ndim == 3 || ndim == 4),
"non-empty 3D or 4D input tensor expected but got a tensor with size ",
input.sizes());
int64_t input_height = input.size(dimh);
int64_t input_width = input.size(dimw);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_height,
" x ",
input_width,
"). Calculated output spatial size per channel: (",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (weight.defined()) {
int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
if (grad_output.defined()) {
if (weight.defined()) {
int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void conv_transpose2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias_,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& columns_,
Tensor& ones_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"conv_transpose2d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
Tensor columns = columns_;
Tensor ones = ones_;
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
conv_transpose2d_shape_check(
input_,
Tensor(),
weight_,
bias_,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
Tensor bias = Tensor();
if (bias_.defined()) {
bias = bias_.contiguous();
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
}
int64_t input_height = input.size(2);
int64_t input_width = input.size(3);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_({batch_size, n_output_plane, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "conv_transpose2d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(1) * weight.size(2) * weight.size(3);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
'n',
't',
n,
m,
k,
1,
input_n.data<scalar_t>(),
n,
weight.data<scalar_t>(),
m,
0,
columns.data<scalar_t>(),
n);
// Unpack columns back into input:
col2im<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
'n',
n_,
m_,
k_,
1,
ones.data<scalar_t>(),
k_,
bias.data<scalar_t>(),
k_,
1,
output_n.data<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
static void conv_transpose2d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& grad_columns_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns_, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"conv_transpose2d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor grad_columns = grad_columns_;
conv_transpose2d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_({batch_size, n_input_plane, input_height, input_width});
// Resize temporary columns
grad_columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "conv_transpose2d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n = Tensor();
Tensor grad_output_n = Tensor();
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
// Extract columns:
im2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
grad_columns.data<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k = weight.size(1) * weight.size(2) * weight.size(3);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
'n',
'n',
n,
m,
k,
1,
grad_columns.data<scalar_t>(),
n,
weight.data<scalar_t>(),
k,
0,
grad_input_n.data<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
grad_input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
void conv_transpose2d_acc_grad_parameters_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& columns_,
const Tensor& ones_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"conv_transpose2d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor columns = columns_;
Tensor ones = ones_;
conv_transpose2d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
true);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
int64_t n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
TORCH_CHECK(columns.is_contiguous(), "columns needs to be contiguous");
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1); // or static_cast<scalar_t>(1)
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "conv_transpose2d_acc_grad_parameters_cuda", [&] {
// Helpers
Tensor input_n = Tensor();
Tensor grad_output_n = Tensor();
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
// Extract columns:
im2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
columns.data<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
'n',
n,
m,
k,
scale,
columns.data<scalar_t>(),
k,
input_n.data<scalar_t>(),
k,
1,
grad_weight.data<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
k_,
m_,
scale,
grad_output_n.data<scalar_t>(),
k_,
ones.data<scalar_t>(),
1,
1,
grad_bias.data<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({input.size(1), input_height, input_width});
}
}); // end of dispatch
}
} // namespace
Tensor& conv_transpose2d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor columns = at::empty_like(input);
Tensor ones = at::empty_like(input);
conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
Tensor conv_transpose2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input);
Tensor columns = at::empty_like(input);
Tensor ones = at::empty_like(input);
conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> conv_transpose2d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones) {
if (grad_input.defined()) {
conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> conv_transpose2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
571878989e95495982c168bd62a2739858942750.cu
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/im2col.cuh>
namespace at {
namespace native {
namespace {
static inline void conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_height,
int kernel_width,
int stride_height,
int stride_width,
int pad_height,
int pad_width,
int output_padding_height,
int output_padding_width,
int dilation_height,
int dilation_width,
bool weight_nullable) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation, ",
"but got output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && (weight.dim() == 2 || weight.dim() == 4),
"non-empty 2D or 4D weight tensor expected, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(
input.numel() != 0 && (ndim == 3 || ndim == 4),
"non-empty 3D or 4D input tensor expected but got a tensor with size ",
input.sizes());
int64_t input_height = input.size(dimh);
int64_t input_width = input.size(dimw);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_height,
" x ",
input_width,
"). Calculated output spatial size per channel: (",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (weight.defined()) {
int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
if (grad_output.defined()) {
if (weight.defined()) {
int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void conv_transpose2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias_,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& columns_,
Tensor& ones_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"conv_transpose2d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
Tensor columns = columns_;
Tensor ones = ones_;
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
conv_transpose2d_shape_check(
input_,
Tensor(),
weight_,
bias_,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
Tensor bias = Tensor();
if (bias_.defined()) {
bias = bias_.contiguous();
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
}
int64_t input_height = input.size(2);
int64_t input_width = input.size(3);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_({batch_size, n_output_plane, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "conv_transpose2d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(1) * weight.size(2) * weight.size(3);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
'n',
't',
n,
m,
k,
1,
input_n.data<scalar_t>(),
n,
weight.data<scalar_t>(),
m,
0,
columns.data<scalar_t>(),
n);
// Unpack columns back into input:
col2im<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
columns.data<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
'n',
n_,
m_,
k_,
1,
ones.data<scalar_t>(),
k_,
bias.data<scalar_t>(),
k_,
1,
output_n.data<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
static void conv_transpose2d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& grad_columns_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns_, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"conv_transpose2d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor grad_columns = grad_columns_;
conv_transpose2d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_({batch_size, n_input_plane, input_height, input_width});
// Resize temporary columns
grad_columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "conv_transpose2d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n = Tensor();
Tensor grad_output_n = Tensor();
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
// Extract columns:
im2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
grad_columns.data<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k = weight.size(1) * weight.size(2) * weight.size(3);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
'n',
'n',
n,
m,
k,
1,
grad_columns.data<scalar_t>(),
n,
weight.data<scalar_t>(),
k,
0,
grad_input_n.data<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
grad_input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
void conv_transpose2d_acc_grad_parameters_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& columns_,
const Tensor& ones_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"conv_transpose2d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor columns = columns_;
Tensor ones = ones_;
conv_transpose2d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
true);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
int64_t n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
TORCH_CHECK(columns.is_contiguous(), "columns needs to be contiguous");
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1); // or static_cast<scalar_t>(1)
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "conv_transpose2d_acc_grad_parameters_cuda", [&] {
// Helpers
Tensor input_n = Tensor();
Tensor grad_output_n = Tensor();
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
// Extract columns:
im2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
columns.data<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
'n',
n,
m,
k,
scale,
columns.data<scalar_t>(),
k,
input_n.data<scalar_t>(),
k,
1,
grad_weight.data<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
k_,
m_,
scale,
grad_output_n.data<scalar_t>(),
k_,
ones.data<scalar_t>(),
1,
1,
grad_bias.data<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({input.size(1), input_height, input_width});
}
}); // end of dispatch
}
} // namespace
Tensor& conv_transpose2d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor columns = at::empty_like(input);
Tensor ones = at::empty_like(input);
conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
Tensor conv_transpose2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input);
Tensor columns = at::empty_like(input);
Tensor ones = at::empty_like(input);
conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> conv_transpose2d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones) {
if (grad_input.defined()) {
conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> conv_transpose2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
66e6e1889a54e7c5bfc584d0f61ef30753004be6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h> // cuda sample vectorAdd uses this ?? why ?
#include <hip/hip_runtime.h>
#include <cudaAdd.cuh>
//#include <cudaAdd.h>
extern "C"
__global__ void vectorAdditionCUDA(const float* a, const float* b, float* c, int n)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if (ii < n)
c[ii] = a[ii] + b[ii];
}
void vectorAddition(const float* a, const float* b, float* c, int n) {
float *a_cuda, *b_cuda, *c_cuda;
unsigned int nBytes = sizeof(float) * n;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
// allocate and copy memory into the device
hipMalloc((void **)& a_cuda, nBytes);
hipMalloc((void **)& b_cuda, nBytes);
hipMalloc((void **)& c_cuda, nBytes);
hipMemcpy(a_cuda, a, nBytes, hipMemcpyHostToDevice);
hipMemcpy(b_cuda, b, nBytes, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( vectorAdditionCUDA), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, a_cuda, b_cuda, c_cuda, n);
// load the answer back into the host
hipMemcpy(c, c_cuda, nBytes, hipMemcpyDeviceToHost);
hipFree(a_cuda);
hipFree(b_cuda);
hipFree(c_cuda);
}
|
66e6e1889a54e7c5bfc584d0f61ef30753004be6.cu
|
#include <cuda_runtime.h> // cuda sample vectorAdd uses this ?? why ?
#include <cuda.h>
#include <cudaAdd.cuh>
//#include <cudaAdd.h>
extern "C"
__global__ void vectorAdditionCUDA(const float* a, const float* b, float* c, int n)
{
int ii = blockDim.x * blockIdx.x + threadIdx.x;
if (ii < n)
c[ii] = a[ii] + b[ii];
}
void vectorAddition(const float* a, const float* b, float* c, int n) {
float *a_cuda, *b_cuda, *c_cuda;
unsigned int nBytes = sizeof(float) * n;
int threadsPerBlock = 256;
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
// allocate and copy memory into the device
cudaMalloc((void **)& a_cuda, nBytes);
cudaMalloc((void **)& b_cuda, nBytes);
cudaMalloc((void **)& c_cuda, nBytes);
cudaMemcpy(a_cuda, a, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(b_cuda, b, nBytes, cudaMemcpyHostToDevice);
vectorAdditionCUDA<<<blocksPerGrid, threadsPerBlock>>>(a_cuda, b_cuda, c_cuda, n);
// load the answer back into the host
cudaMemcpy(c, c_cuda, nBytes, cudaMemcpyDeviceToHost);
cudaFree(a_cuda);
cudaFree(b_cuda);
cudaFree(c_cuda);
}
|
d87883b2e56c33cf7fde7a89de034c5cf6784b0a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//.cu code
#include "test.h"
// ...
__global__ void funcKernel(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows)
{
int r = blockIdx.y*blockDim.y+threadIdx.y;
int c = blockIdx.x*blockDim.x+threadIdx.x;
if(r >= rows || c >= cols)
return;
const float* rowsrcptr= (const float *)(((char *)srcptr)+r*srcstep);
float* rowdstPtr= (float *) (((char *)dstptr)+r*dststep);
float val = rowsrcptr[c];
if((int) val % 90 == 0)
{
rowdstPtr[c] = -1 ;
}
else
{
float acos_val = acos(val);
rowdstPtr[c] = acos_val;
}
}
int divUp(int a, int b)
{
return (a+b-1)/b;
}
void func(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows)
{
dim3 blDim(32,8);
dim3 grDim(divUp(cols, blDim.x), divUp(rows,blDim.y));
std::cout << "calling kernel from func\n";
hipLaunchKernelGGL(( funcKernel), dim3(grDim),dim3(blDim), 0, 0, srcptr,dstptr,srcstep,dststep,cols,rows);
std::cout << "done with kernel call\n";
hipDeviceSynchronize();
}
|
d87883b2e56c33cf7fde7a89de034c5cf6784b0a.cu
|
//.cu code
#include "test.h"
// ...
__global__ void funcKernel(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows)
{
int r = blockIdx.y*blockDim.y+threadIdx.y;
int c = blockIdx.x*blockDim.x+threadIdx.x;
if(r >= rows || c >= cols)
return;
const float* rowsrcptr= (const float *)(((char *)srcptr)+r*srcstep);
float* rowdstPtr= (float *) (((char *)dstptr)+r*dststep);
float val = rowsrcptr[c];
if((int) val % 90 == 0)
{
rowdstPtr[c] = -1 ;
}
else
{
float acos_val = acos(val);
rowdstPtr[c] = acos_val;
}
}
int divUp(int a, int b)
{
return (a+b-1)/b;
}
void func(const float* srcptr, float* dstptr, size_t srcstep, const size_t dststep, int cols, int rows)
{
dim3 blDim(32,8);
dim3 grDim(divUp(cols, blDim.x), divUp(rows,blDim.y));
std::cout << "calling kernel from func\n";
funcKernel<<<grDim,blDim>>>(srcptr,dstptr,srcstep,dststep,cols,rows);
std::cout << "done with kernel call\n";
cudaDeviceSynchronize();
}
|
fab55cdaf17c3e72a5c1e4bdf4a9200f10c6e621.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*****************************************************************************************
mkparlist.c
Takes mod_t and dat_t files and makes a list of pointers to the free parameters.
Modified 2016 Dec 20 by ME:
Adapted for use with CUDA. This version of mkparlist is used exclusively
for structures residing in device (GPU) memory.
Modified 2014 Aug 22 by SN:
Add spin.lib_amp, spin.lib_freq, spin.lib_phase for taking into account librations.
Modified 2014 February 12 by CM:
Implement multiple radar and optical scattering laws
Modified 2013 May 20 by CM:
Implement ovoid shape components
Modified 2012 March 23 by CM:
Add the "dopscale" parameter for delay-Doppler and Doppler datasets
Modified 2012 March 14 by CM:
Have separate code blocks for delay-Doppler vs. Doppler datasets when handling the
delay correction polynomial coefficients
Modified 2011 September 2 by CM:
Add the "harmlambert" and "inholambert" optical scattering laws
Modified 2011 August 7 by CM:
Add spin impulses
Modified 2010 June 1 by CM:
Change "scalefactor" parameter from a scalar to a 3-component vector
for harmonic and vertex shape structures
Modified 2010 April 27 by CM:
Add "tabular" radar scattering law
Modified 2008 August 10 by CM:
Change "delcor_step" and "delcor_abstol" parameters to be vectors
rather than scalars
Change parameter type for each component's linear offsets in the
mod file from SHAPEPAR to SIZEPAR
Modified 2006 October 1 by CM:
Replace ellipsoid diameters D with two_a, a_over_b, and b_over_c
Add "scalefactor" to harmonic and vertex shape structures
Add "ratio_step" "ratio_tol" and "ratio_abstol" fit parameters
Add SIZEPAR parameters
Modified 2006 March 6 by PT:
Add "spin.omegadot" parameter for changing spin rate
Modified 2005 September 7 by CM:
Add "harmlommel" "harmhapke" and "harmkaas" optical scattering laws
Add "harmcosine" radar scattering law
Modified 2005 August 1 by CM:
Add "inhokaas" optical scattering law
Modified 2005 July 20 by CM:
Add "hagfors" and "cosine_qs" and "gauss+cosine" and "hagfors+cosine"
and "cosine+cosine" and inhomogeneous "inhocosine" radar
scattering laws
Eliminate "flat" radar scattering law
Modified 2005 July 4 by CM:
Add inhomogeneous "inholommel" and "inhohapke" optical scattering laws
Modified 2005 February 24 by CM:
Add POS case for data parameters so that the horizontal and vertical
offsets for plane-of-sky datasets can be fitted
Modified 2004 August 13 by CM:
Assign "fparabstol" pointers to handle absolute fitting tolerances
Modified 2004 April 29 by CM:
For Kaasalainen scattering law, switch from weighting factor "c"
(ranging from 0 to infinity) to "wt" (ranging from 0 to 1)
Modified 2004 March 13 by CM:
Add code to handle harmonic components
Modified 2004 February 25 by CM:
Add Kaasalainen "Lambert + Lommel-Seeliger" scattering law parameters
Modified 2003 April 3 by CM:
1) Fit ellipsoid diameters using length_step and length_tol
rather than angle_step and angle_tol
2) Fit photometric parameters using photo_step and photo_tol
rather than length_step and length_tol
3) Fit moments of inertia using inertia_step and inertia_tol
rather than spin_step and spin_tol
4) Fit delay correction polynomial coefficients using
delcor_step and delcor_tol rather than spin_step and spin_tol
*****************************************************************************************/
extern "C" {
#include "../shape/head.h"
}
__device__ int p;
__global__ void mpl_comp_krnl(struct par_t *dpar, struct mod_t *dmod,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
p = -1;
int i = 0, j, k; /* component index - always zero */
for (j=0; j<=2; j++) { /* check linear offsets */
if (dmod->shape.comp[i].off[j].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].off[j].val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SIZEPAR;
}
}
for (j=0; j<=2; j++) { /* check angular offsets */
if (dmod->shape.comp[i].rot[j].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].rot[j].val;
fparstep[p] = dpar->angle_step;
fpartol[p] = dpar->angle_tol;
fparabstol[p] = dpar->angle_abstol;
fpartype[p] = SHAPEPAR;
}
}
switch (dmod->shape.comp[i].type) {
case ELLIPSE:
if (dmod->shape.comp[i].desc.ell.two_a.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ell.two_a.val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SIZEPAR;
}
if (dmod->shape.comp[i].desc.ell.a_over_b.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ell.a_over_b.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
if (dmod->shape.comp[i].desc.ell.b_over_c.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ell.b_over_c.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
break;
case OVOID:
if (dmod->shape.comp[i].desc.ovoid.two_a.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ovoid.two_a.val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SIZEPAR;
}
if (dmod->shape.comp[i].desc.ovoid.a_over_b.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ovoid.a_over_b.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
if (dmod->shape.comp[i].desc.ovoid.b_over_c.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ovoid.b_over_c.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
if (dmod->shape.comp[i].desc.ovoid.k.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ovoid.k.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
break;
case HARMONIC:
for (j=0; j<=2; j++)
if (dmod->shape.comp[i].desc.har.scalefactor[j].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.har.scalefactor[j].val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SIZEPAR;
}
for (j=0; j<=dmod->shape.comp[i].desc.har.nhar; j++) {
if (dmod->shape.comp[i].desc.har.a[j][0].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.har.a[j][0].val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SHAPEPAR;
}
for (k=1; k<=j; k++) {
if (dmod->shape.comp[i].desc.har.a[j][k].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.har.a[j][k].val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SHAPEPAR;
}
if (dmod->shape.comp[i].desc.har.b[j][k].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.har.b[j][k].val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SHAPEPAR;
}
}
}
break;
case VERTEX:
for (j=0; j<=2; j++)
if (dmod->shape.comp[i].desc.ver.scalefactor[j].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ver.scalefactor[j].val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SIZEPAR;
}
for (j=0; j<dmod->shape.comp[i].desc.ver.nv; j++) {
if (dmod->shape.comp[i].desc.ver.v[j].r.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ver.v[j].r.val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SHAPEPAR;
}
}
break;
default:
printf("mkparlist_cuda.cu: can't do that type of model yet\n");
}
}
}
__global__ void mpl_rad_krnl(struct par_t *dpar, struct mod_t *dmod,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
int ilaw, i, c, f, m, l;
for (ilaw=0; ilaw<dmod->photo.nradlaws; ilaw++) {
switch (dmod->photo.radtype[ilaw]) {
case COSINELAW_DIFF:
if (dmod->photo.radar[ilaw].RC.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].RC.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].RC.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].RC.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case TABULARLAW:
for (i=0; i<dmod->photo.radar[ilaw].tabular.n; i++) {
if (dmod->photo.radar[ilaw].tabular.rho[i].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].tabular.rho[i].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case HARMCOSINE_DIFF:
for (l=0; l<=dmod->photo.radar[ilaw].harmcosine.R.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.radar[ilaw].harmcosine.R.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].harmcosine.R.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.radar[ilaw].harmcosine.R.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].harmcosine.R.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.radar[ilaw].harmcosine.C.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.radar[ilaw].harmcosine.C.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].harmcosine.C.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.radar[ilaw].harmcosine.C.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].harmcosine.C.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case GAUSSIANLAW :
case HAGFORSLAW :
case COSINELAW_QS:
if (dmod->photo.radar[ilaw].quasispec.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].quasispec.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].quasispec.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].quasispec.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case GAUSSIAN_COSINE:
case HAGFORS_COSINE :
case COSINE_COSINE :
if (dmod->photo.radar[ilaw].hybrid.qs.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].hybrid.qs.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].hybrid.qs.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].hybrid.qs.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].hybrid.diff.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].hybrid.diff.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].hybrid.diff.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].hybrid.diff.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case INHOCOSINE_DIFF:
if (dmod->photo.radar[ilaw].inhocosine.global.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].inhocosine.global.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].inhocosine.global.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].inhocosine.global.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
for (c=0; c<dmod->shape.ncomp; c++)
for (f=0; f<dmod->shape.comp[c].real.nf; f++) {
if (dmod->photo.radar[ilaw].inhocosine.local[c][f].R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].inhocosine.local[c][f].R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].inhocosine.local[c][f].C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].inhocosine.local[c][f].C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case NOLAW:
break;
default:
printf("mkparlist_gpu.cu: can't do that radar law yet\n");
}
}
}
}
__global__ void mpl_photo_krnl(struct par_t *dpar, struct mod_t *dmod,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
int ilaw, c, f, l, m;
for (ilaw=0; ilaw<dmod->photo.noptlaws; ilaw++) {
switch (dmod->photo.opttype[ilaw]) {
case NOLAW:
break;
case GEOMETRICAL:
case LAMBERTLAW:
case LOMMEL:
if (dmod->photo.optical[ilaw].R.R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].R.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case HARMLAMBERT:
case HARMLOMMEL:
for (l=0; l<=dmod->photo.optical[ilaw].harmR.R.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmR.R.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmR.R.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmR.R.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmR.R.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case INHOLAMBERT:
case INHOLOMMEL:
if (dmod->photo.optical[ilaw].inhoR.global.R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhoR.global.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
for (c=0; c<dmod->shape.ncomp; c++)
for (f=0; f<dmod->shape.comp[c].real.nf; f++)
if (dmod->photo.optical[ilaw].inhoR.local[c][f].R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhoR.local[c][f].R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case HAPKE:
if (dmod->photo.optical[ilaw].hapke.w.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.w.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].hapke.h.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.h.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].hapke.B0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.B0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].hapke.g.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.g.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].hapke.theta.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.theta.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case HARMHAPKE:
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.w.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.w.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.w.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.w.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.w.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.h.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.h.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.h.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.h.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.h.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.B0.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.B0.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.B0.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.B0.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.B0.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.g.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.g.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.g.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.g.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.g.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.theta.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.theta.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.theta.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.theta.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.theta.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case INHOHAPKE:
if (dmod->photo.optical[ilaw].inhohapke.global.w.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.w.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.global.h.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.h.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.global.B0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.B0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.global.g.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.g.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.global.theta.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.theta.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
for (c=0; c<dmod->shape.ncomp; c++)
for (f=0; f<dmod->shape.comp[c].real.nf; f++) {
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].w.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].w.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].h.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].h.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].B0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].B0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].g.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].g.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].theta.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].theta.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case KAASALAINEN:
if (dmod->photo.optical[ilaw].kaas.R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].kaas.wt.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.wt.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].kaas.A0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.A0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].kaas.D.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.D.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].kaas.k.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.k.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case HARMKAAS:
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.R.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.R.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.R.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.R.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.R.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.wt.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.wt.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.wt.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.wt.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.wt.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.A0.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.A0.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.A0.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.A0.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.A0.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.D.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.D.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.D.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.D.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.D.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.k.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.k.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.k.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.k.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.k.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case INHOKAAS:
if (dmod->photo.optical[ilaw].inhokaas.global.R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.global.wt.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.wt.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.global.A0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.A0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.global.D.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.D.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.global.k.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.k.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
for (c=0; c<dmod->shape.ncomp; c++)
for (f=0; f<dmod->shape.comp[c].real.nf; f++) {
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].wt.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].wt.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].A0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].A0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].D.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].D.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].k.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].k.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
default:
printf("mkparlist_cuda.c: can't do that optical law yet\n");
}
}
}
}
__global__ void mpl_spin_krnl(struct par_t *dpar, struct mod_t *dmod,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
int i, k;
for (i=0; i<=2; i++) {
if (dmod->spin.angle[i].state == 'f') {
fpntr[++p] = &dmod->spin.angle[i].val;
fparstep[p] = dpar->angle_step;
fpartol[p] = dpar->angle_tol;
fparabstol[p] = dpar->angle_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (dmod->spin.omega[i].state == 'f') {
fpntr[++p] = &dmod->spin.omega[i].val;
fparstep[p] = dpar->spin_step;
fpartol[p] = dpar->spin_tol;
fparabstol[p] = dpar->spin_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (dmod->spin.inertia[i].state == 'f') {
fpntr[++p] = &dmod->spin.inertia[i].val;
fparstep[p] = dpar->inertia_step;
fpartol[p] = dpar->inertia_tol;
fparabstol[p] = dpar->inertia_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (dmod->spin.omegadot[i].state == 'f') {
fpntr[++p] = &dmod->spin.omegadot[i].val;
fparstep[p] = dpar->spindot_step;
fpartol[p] = dpar->spindot_tol;
fparabstol[p] = dpar->spindot_abstol;
fpartype[p] = SPINPAR;
}
}
for (k=0; k<dmod->spin.n_impulse; k++)
for (i=0; i<=2; i++) {
if (dmod->spin.impulse[k][i].state == 'f') {
fpntr[++p] = &dmod->spin.impulse[k][i].val;
fparstep[p] = dpar->spin_step;
fpartol[p] = dpar->spin_tol;
fparabstol[p] = dpar->spin_abstol;
fpartype[p] = SPINPAR;
}
}
if (dmod->spin.lib_amp.state == 'f') {
fpntr[++p] = &dmod->spin.lib_amp.val;
fparstep[p] = dpar->lib_amp_step;
fpartol[p] = dpar->lib_amp_tol;
fparabstol[p] = dpar->lib_amp_abstol;
fpartype[p] = SPINPAR;
}
if (dmod->spin.lib_freq.state == 'f') {
fpntr[++p] = &dmod->spin.lib_freq.val;
fparstep[p] = dpar->lib_freq_step;
fpartol[p] = dpar->lib_freq_tol;
fparabstol[p] = dpar->lib_freq_abstol;
fpartype[p] = SPINPAR;
}
if (dmod->spin.lib_phase.state == 'f') {
fpntr[++p] = &dmod->spin.lib_phase.val;
fparstep[p] = dpar->lib_phase_step;
fpartol[p] = dpar->lib_phase_tol;
fparabstol[p] = dpar->lib_phase_abstol;
fpartype[p] = SPINPAR;
}
}
}
__global__ void mpl_dat_krnl(struct par_t *dpar, struct dat_t *ddat,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype, int nsets) {
/* nsets-threaded kernel */
//int s = blockIdx.x * blockDim.x + threadIdx.x;
int i, j;
for (int s=0; s<nsets; s++) {
for (i=0; i<=2; i++) {
if (ddat->set[s].angleoff[i].state == 'f') {
fpntr[++p] = &ddat->set[s].angleoff[i].val;
fparstep[p] = dpar->angle_step;
fpartol[p] = dpar->angle_tol;
fparabstol[p] = dpar->angle_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (ddat->set[s].omegaoff[i].state == 'f') {
fpntr[++p] = &ddat->set[s].omegaoff[i].val;
fparstep[p] = dpar->spin_step;
fpartol[p] = dpar->spin_tol;
fparabstol[p] = dpar->spin_abstol;
fpartype[p] = SPINPAR;
}
}
switch (ddat->set[s].type) {
case DELAY:
for (i=0; i<=ddat->set[s].desc.deldop.delcor.n; i++) {
if (ddat->set[s].desc.deldop.delcor.a[i].state == 'f') {
j = (i < MAXDELCORPAR) ? i : 0;
fpntr[++p] = &ddat->set[s].desc.deldop.delcor.a[i].val;
fparstep[p] = dpar->delcor_step[j];
fpartol[p] = dpar->delcor_tol;
fparabstol[p] = dpar->delcor_abstol[j];
fpartype[p] = DELCORPAR;
}
}
if (ddat->set[s].desc.deldop.dopscale.state == 'f') {
fpntr[++p] = &ddat->set[s].desc.deldop.dopscale.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = DOPSCALEPAR;
}
break;
case DOPPLER:
for (i=0; i<=ddat->set[s].desc.doppler.delcor.n; i++) {
if (ddat->set[s].desc.doppler.delcor.a[i].state == 'f') {
j = (i < MAXDELCORPAR) ? i : 0;
fpntr[++p] = &ddat->set[s].desc.doppler.delcor.a[i].val;
fparstep[p] = dpar->delcor_step[j];
fpartol[p] = dpar->delcor_tol;
fparabstol[p] = dpar->delcor_abstol[j];
fpartype[p] = DELCORPAR;
}
}
if (ddat->set[s].desc.doppler.dopscale.state == 'f') {
fpntr[++p] = &ddat->set[s].desc.doppler.dopscale.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = DOPSCALEPAR;
}
break;
case POS:
for (i=0; i<ddat->set[s].desc.poset.nframes; i++) {
for (j=0; j<=1; j++) {
if (ddat->set[s].desc.poset.frame[i].off[j].state == 'f') {
fpntr[++p] = &ddat->set[s].desc.poset.frame[i].off[j].val;
fparstep[p] = dpar->xyoff_step;
fpartol[p] = dpar->xyoff_tol;
fparabstol[p] = dpar->xyoff_abstol;
fpartype[p] = XYOFFPAR;
}
}
}
break;
case LGHTCRV:
break;
default:
printf("mkparlist_cuda.cu: can't handle that type of data yet\n");
}
}
}
__global__ void mpl_dat_MFS_krnl(struct par_t *dpar, struct dat_t *ddat,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype, int nsets) {
/* nsets-threaded kernel */
//int s = blockIdx.x * blockDim.x + threadIdx.x;
int i, j;
for (int s=0; s<nsets; s++) {
for (int f=0; f<ddat->set[s].desc.deldop.nframes; f++) {
for (i=0; i<=2; i++) {
if (ddat->set[s].desc.deldop.frame[f].angleoff[i].state == 'f') {
fpntr[++p] = &ddat->set[s].desc.deldop.frame[f].angleoff[i].val;
fparstep[p] = dpar->angle_step;
fpartol[p] = dpar->angle_tol;
fparabstol[p] = dpar->angle_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (ddat->set[s].desc.deldop.frame[f].omegaoff[i].state == 'f') {
fpntr[++p] = &ddat->set[s].desc.deldop.frame[f].omegaoff[i].val;
fparstep[p] = dpar->spin_step;
fpartol[p] = dpar->spin_tol;
fparabstol[p] = dpar->spin_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=ddat->set[s].desc.deldop.frame[f].delcor.n; i++) {
if (ddat->set[s].desc.deldop.frame[f].delcor.a[i].state == 'f') {
j = (i < MAXDELCORPAR) ? i : 0;
fpntr[++p] = &ddat->set[s].desc.deldop.frame[f].delcor.a[i].val;
fparstep[p] = dpar->delcor_step[j];
fpartol[p] = dpar->delcor_tol;
fparabstol[p] = dpar->delcor_abstol[j];
fpartype[p] = DELCORPAR;
}
}
if (ddat->set[s].desc.deldop.frame[f].dopscale.state == 'f') {
fpntr[++p] = &ddat->set[s].desc.deldop.frame[f].dopscale.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = DOPSCALEPAR;
}
}
}
}
__host__ void mkparlist_gpu(struct par_t *dpar, struct mod_t *dmod,
struct dat_t *ddat, double *fparstep, double *fpartol,
double *fparabstol, int *fpartype, double **fpntr,
int nfpar, int nsets)
{
dim3 BLK,THD;
THD.x = maxThreadsPerBlock;
gpuErrchk(hipSetDevice(GPU0));
/* Shape parameters - single component only */
//for (i=0; i<dmod->shape.ncomp; i++) { /* read each component */
/* Launch first parameter kernel */
hipLaunchKernelGGL(( mpl_comp_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod, fpntr, fparstep, fpartol,
fparabstol, fpartype);
checkErrorAfterKernelLaunch("mpl_comp_krnl (mkparlist_cuda.cu)");
/* Photometric parameters - only one radlaw at a time for now */
//for (ilaw=0; ilaw<dmod->photo.nradlaws; ilaw++) {
/* Launch photometric kernel */
hipLaunchKernelGGL(( mpl_rad_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod, fpntr, fparstep, fpartol,
fparabstol, fpartype);
checkErrorAfterKernelLaunch("mpl_rad_krnl (mkparlist_cuda.cu)");
/* Photometric parameters - only one optlaw at a time */
//for (ilaw=0; ilaw<dmod->photo.noptlaws; ilaw++) {
hipLaunchKernelGGL(( mpl_photo_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod, fpntr, fparstep, fpartol,
fparabstol, fpartype);
checkErrorAfterKernelLaunch("mpl_photo_krnl (mkparlist_cuda.cu)");
/* Spin parameters */
hipLaunchKernelGGL(( mpl_spin_krnl), dim3(1),dim3(1), 0, 0, dpar, dmod, fpntr, fparstep, fpartol,
fparabstol, fpartype);
checkErrorAfterKernelLaunch("mpl_spin_krnl (mkparlist_cuda.cu)");
/* Data parameters (i.e., those in the obs file, other than the "calfact"
* parameters which are computed analytically)
* Launching nsets threads here */
BLK.x = floor((THD.x - 1 + nsets)/THD.x);
hipLaunchKernelGGL(( mpl_dat_krnl), dim3(1),dim3(1), 0, 0, dpar, ddat, fpntr, fparstep, fpartol,
fparabstol, fpartype, nsets);
checkErrorAfterKernelLaunch("mpl_dat_krnl (mkparlist_cuda.cu)");
}
|
fab55cdaf17c3e72a5c1e4bdf4a9200f10c6e621.cu
|
/*****************************************************************************************
mkparlist.c
Takes mod_t and dat_t files and makes a list of pointers to the free parameters.
Modified 2016 Dec 20 by ME:
Adapted for use with CUDA. This version of mkparlist is used exclusively
for structures residing in device (GPU) memory.
Modified 2014 Aug 22 by SN:
Add spin.lib_amp, spin.lib_freq, spin.lib_phase for taking into account librations.
Modified 2014 February 12 by CM:
Implement multiple radar and optical scattering laws
Modified 2013 May 20 by CM:
Implement ovoid shape components
Modified 2012 March 23 by CM:
Add the "dopscale" parameter for delay-Doppler and Doppler datasets
Modified 2012 March 14 by CM:
Have separate code blocks for delay-Doppler vs. Doppler datasets when handling the
delay correction polynomial coefficients
Modified 2011 September 2 by CM:
Add the "harmlambert" and "inholambert" optical scattering laws
Modified 2011 August 7 by CM:
Add spin impulses
Modified 2010 June 1 by CM:
Change "scalefactor" parameter from a scalar to a 3-component vector
for harmonic and vertex shape structures
Modified 2010 April 27 by CM:
Add "tabular" radar scattering law
Modified 2008 August 10 by CM:
Change "delcor_step" and "delcor_abstol" parameters to be vectors
rather than scalars
Change parameter type for each component's linear offsets in the
mod file from SHAPEPAR to SIZEPAR
Modified 2006 October 1 by CM:
Replace ellipsoid diameters D with two_a, a_over_b, and b_over_c
Add "scalefactor" to harmonic and vertex shape structures
Add "ratio_step" "ratio_tol" and "ratio_abstol" fit parameters
Add SIZEPAR parameters
Modified 2006 March 6 by PT:
Add "spin.omegadot" parameter for changing spin rate
Modified 2005 September 7 by CM:
Add "harmlommel" "harmhapke" and "harmkaas" optical scattering laws
Add "harmcosine" radar scattering law
Modified 2005 August 1 by CM:
Add "inhokaas" optical scattering law
Modified 2005 July 20 by CM:
Add "hagfors" and "cosine_qs" and "gauss+cosine" and "hagfors+cosine"
and "cosine+cosine" and inhomogeneous "inhocosine" radar
scattering laws
Eliminate "flat" radar scattering law
Modified 2005 July 4 by CM:
Add inhomogeneous "inholommel" and "inhohapke" optical scattering laws
Modified 2005 February 24 by CM:
Add POS case for data parameters so that the horizontal and vertical
offsets for plane-of-sky datasets can be fitted
Modified 2004 August 13 by CM:
Assign "fparabstol" pointers to handle absolute fitting tolerances
Modified 2004 April 29 by CM:
For Kaasalainen scattering law, switch from weighting factor "c"
(ranging from 0 to infinity) to "wt" (ranging from 0 to 1)
Modified 2004 March 13 by CM:
Add code to handle harmonic components
Modified 2004 February 25 by CM:
Add Kaasalainen "Lambert + Lommel-Seeliger" scattering law parameters
Modified 2003 April 3 by CM:
1) Fit ellipsoid diameters using length_step and length_tol
rather than angle_step and angle_tol
2) Fit photometric parameters using photo_step and photo_tol
rather than length_step and length_tol
3) Fit moments of inertia using inertia_step and inertia_tol
rather than spin_step and spin_tol
4) Fit delay correction polynomial coefficients using
delcor_step and delcor_tol rather than spin_step and spin_tol
*****************************************************************************************/
extern "C" {
#include "../shape/head.h"
}
__device__ int p;
__global__ void mpl_comp_krnl(struct par_t *dpar, struct mod_t *dmod,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
p = -1;
int i = 0, j, k; /* component index - always zero */
for (j=0; j<=2; j++) { /* check linear offsets */
if (dmod->shape.comp[i].off[j].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].off[j].val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SIZEPAR;
}
}
for (j=0; j<=2; j++) { /* check angular offsets */
if (dmod->shape.comp[i].rot[j].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].rot[j].val;
fparstep[p] = dpar->angle_step;
fpartol[p] = dpar->angle_tol;
fparabstol[p] = dpar->angle_abstol;
fpartype[p] = SHAPEPAR;
}
}
switch (dmod->shape.comp[i].type) {
case ELLIPSE:
if (dmod->shape.comp[i].desc.ell.two_a.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ell.two_a.val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SIZEPAR;
}
if (dmod->shape.comp[i].desc.ell.a_over_b.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ell.a_over_b.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
if (dmod->shape.comp[i].desc.ell.b_over_c.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ell.b_over_c.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
break;
case OVOID:
if (dmod->shape.comp[i].desc.ovoid.two_a.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ovoid.two_a.val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SIZEPAR;
}
if (dmod->shape.comp[i].desc.ovoid.a_over_b.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ovoid.a_over_b.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
if (dmod->shape.comp[i].desc.ovoid.b_over_c.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ovoid.b_over_c.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
if (dmod->shape.comp[i].desc.ovoid.k.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ovoid.k.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SHAPEPAR;
}
break;
case HARMONIC:
for (j=0; j<=2; j++)
if (dmod->shape.comp[i].desc.har.scalefactor[j].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.har.scalefactor[j].val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SIZEPAR;
}
for (j=0; j<=dmod->shape.comp[i].desc.har.nhar; j++) {
if (dmod->shape.comp[i].desc.har.a[j][0].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.har.a[j][0].val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SHAPEPAR;
}
for (k=1; k<=j; k++) {
if (dmod->shape.comp[i].desc.har.a[j][k].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.har.a[j][k].val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SHAPEPAR;
}
if (dmod->shape.comp[i].desc.har.b[j][k].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.har.b[j][k].val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SHAPEPAR;
}
}
}
break;
case VERTEX:
for (j=0; j<=2; j++)
if (dmod->shape.comp[i].desc.ver.scalefactor[j].state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ver.scalefactor[j].val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = SIZEPAR;
}
for (j=0; j<dmod->shape.comp[i].desc.ver.nv; j++) {
if (dmod->shape.comp[i].desc.ver.v[j].r.state == 'f') {
fpntr[++p] = &dmod->shape.comp[i].desc.ver.v[j].r.val;
fparstep[p] = dpar->length_step;
fpartol[p] = dpar->length_tol;
fparabstol[p] = dpar->length_abstol;
fpartype[p] = SHAPEPAR;
}
}
break;
default:
printf("mkparlist_cuda.cu: can't do that type of model yet\n");
}
}
}
__global__ void mpl_rad_krnl(struct par_t *dpar, struct mod_t *dmod,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
int ilaw, i, c, f, m, l;
for (ilaw=0; ilaw<dmod->photo.nradlaws; ilaw++) {
switch (dmod->photo.radtype[ilaw]) {
case COSINELAW_DIFF:
if (dmod->photo.radar[ilaw].RC.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].RC.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].RC.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].RC.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case TABULARLAW:
for (i=0; i<dmod->photo.radar[ilaw].tabular.n; i++) {
if (dmod->photo.radar[ilaw].tabular.rho[i].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].tabular.rho[i].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case HARMCOSINE_DIFF:
for (l=0; l<=dmod->photo.radar[ilaw].harmcosine.R.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.radar[ilaw].harmcosine.R.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].harmcosine.R.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.radar[ilaw].harmcosine.R.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].harmcosine.R.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.radar[ilaw].harmcosine.C.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.radar[ilaw].harmcosine.C.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].harmcosine.C.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.radar[ilaw].harmcosine.C.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].harmcosine.C.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case GAUSSIANLAW :
case HAGFORSLAW :
case COSINELAW_QS:
if (dmod->photo.radar[ilaw].quasispec.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].quasispec.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].quasispec.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].quasispec.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case GAUSSIAN_COSINE:
case HAGFORS_COSINE :
case COSINE_COSINE :
if (dmod->photo.radar[ilaw].hybrid.qs.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].hybrid.qs.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].hybrid.qs.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].hybrid.qs.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].hybrid.diff.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].hybrid.diff.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].hybrid.diff.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].hybrid.diff.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case INHOCOSINE_DIFF:
if (dmod->photo.radar[ilaw].inhocosine.global.R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].inhocosine.global.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].inhocosine.global.C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].inhocosine.global.C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
for (c=0; c<dmod->shape.ncomp; c++)
for (f=0; f<dmod->shape.comp[c].real.nf; f++) {
if (dmod->photo.radar[ilaw].inhocosine.local[c][f].R.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].inhocosine.local[c][f].R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.radar[ilaw].inhocosine.local[c][f].C.state == 'f') {
fpntr[++p] = &dmod->photo.radar[ilaw].inhocosine.local[c][f].C.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case NOLAW:
break;
default:
printf("mkparlist_gpu.cu: can't do that radar law yet\n");
}
}
}
}
__global__ void mpl_photo_krnl(struct par_t *dpar, struct mod_t *dmod,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
int ilaw, c, f, l, m;
for (ilaw=0; ilaw<dmod->photo.noptlaws; ilaw++) {
switch (dmod->photo.opttype[ilaw]) {
case NOLAW:
break;
case GEOMETRICAL:
case LAMBERTLAW:
case LOMMEL:
if (dmod->photo.optical[ilaw].R.R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].R.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case HARMLAMBERT:
case HARMLOMMEL:
for (l=0; l<=dmod->photo.optical[ilaw].harmR.R.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmR.R.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmR.R.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmR.R.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmR.R.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case INHOLAMBERT:
case INHOLOMMEL:
if (dmod->photo.optical[ilaw].inhoR.global.R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhoR.global.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
for (c=0; c<dmod->shape.ncomp; c++)
for (f=0; f<dmod->shape.comp[c].real.nf; f++)
if (dmod->photo.optical[ilaw].inhoR.local[c][f].R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhoR.local[c][f].R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case HAPKE:
if (dmod->photo.optical[ilaw].hapke.w.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.w.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].hapke.h.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.h.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].hapke.B0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.B0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].hapke.g.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.g.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].hapke.theta.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].hapke.theta.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case HARMHAPKE:
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.w.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.w.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.w.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.w.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.w.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.h.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.h.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.h.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.h.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.h.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.B0.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.B0.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.B0.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.B0.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.B0.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.g.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.g.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.g.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.g.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.g.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmhapke.theta.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmhapke.theta.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.theta.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmhapke.theta.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmhapke.theta.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case INHOHAPKE:
if (dmod->photo.optical[ilaw].inhohapke.global.w.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.w.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.global.h.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.h.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.global.B0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.B0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.global.g.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.g.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.global.theta.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.global.theta.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
for (c=0; c<dmod->shape.ncomp; c++)
for (f=0; f<dmod->shape.comp[c].real.nf; f++) {
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].w.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].w.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].h.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].h.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].B0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].B0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].g.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].g.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhohapke.local[c][f].theta.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhohapke.local[c][f].theta.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case KAASALAINEN:
if (dmod->photo.optical[ilaw].kaas.R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].kaas.wt.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.wt.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].kaas.A0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.A0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].kaas.D.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.D.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].kaas.k.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].kaas.k.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
break;
case HARMKAAS:
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.R.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.R.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.R.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.R.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.R.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.wt.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.wt.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.wt.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.wt.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.wt.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.A0.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.A0.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.A0.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.A0.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.A0.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.D.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.D.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.D.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.D.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.D.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
for (l=0; l<=dmod->photo.optical[ilaw].harmkaas.k.nhar; l++)
for (m=0; m<=l; m++) {
if (dmod->photo.optical[ilaw].harmkaas.k.a[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.k.a[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (m > 0 && dmod->photo.optical[ilaw].harmkaas.k.b[l][m].state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].harmkaas.k.b[l][m].val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
case INHOKAAS:
if (dmod->photo.optical[ilaw].inhokaas.global.R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.global.wt.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.wt.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.global.A0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.A0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.global.D.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.D.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.global.k.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.global.k.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
for (c=0; c<dmod->shape.ncomp; c++)
for (f=0; f<dmod->shape.comp[c].real.nf; f++) {
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].R.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].R.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].wt.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].wt.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].A0.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].A0.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].D.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].D.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
if (dmod->photo.optical[ilaw].inhokaas.local[c][f].k.state == 'f') {
fpntr[++p] = &dmod->photo.optical[ilaw].inhokaas.local[c][f].k.val;
fparstep[p] = dpar->photo_step;
fpartol[p] = dpar->photo_tol;
fparabstol[p] = dpar->photo_abstol;
fpartype[p] = PHOTOPAR;
}
}
break;
default:
printf("mkparlist_cuda.c: can't do that optical law yet\n");
}
}
}
}
__global__ void mpl_spin_krnl(struct par_t *dpar, struct mod_t *dmod,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype) {
/* Single-threaded kernel */
if (threadIdx.x == 0) {
int i, k;
for (i=0; i<=2; i++) {
if (dmod->spin.angle[i].state == 'f') {
fpntr[++p] = &dmod->spin.angle[i].val;
fparstep[p] = dpar->angle_step;
fpartol[p] = dpar->angle_tol;
fparabstol[p] = dpar->angle_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (dmod->spin.omega[i].state == 'f') {
fpntr[++p] = &dmod->spin.omega[i].val;
fparstep[p] = dpar->spin_step;
fpartol[p] = dpar->spin_tol;
fparabstol[p] = dpar->spin_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (dmod->spin.inertia[i].state == 'f') {
fpntr[++p] = &dmod->spin.inertia[i].val;
fparstep[p] = dpar->inertia_step;
fpartol[p] = dpar->inertia_tol;
fparabstol[p] = dpar->inertia_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (dmod->spin.omegadot[i].state == 'f') {
fpntr[++p] = &dmod->spin.omegadot[i].val;
fparstep[p] = dpar->spindot_step;
fpartol[p] = dpar->spindot_tol;
fparabstol[p] = dpar->spindot_abstol;
fpartype[p] = SPINPAR;
}
}
for (k=0; k<dmod->spin.n_impulse; k++)
for (i=0; i<=2; i++) {
if (dmod->spin.impulse[k][i].state == 'f') {
fpntr[++p] = &dmod->spin.impulse[k][i].val;
fparstep[p] = dpar->spin_step;
fpartol[p] = dpar->spin_tol;
fparabstol[p] = dpar->spin_abstol;
fpartype[p] = SPINPAR;
}
}
if (dmod->spin.lib_amp.state == 'f') {
fpntr[++p] = &dmod->spin.lib_amp.val;
fparstep[p] = dpar->lib_amp_step;
fpartol[p] = dpar->lib_amp_tol;
fparabstol[p] = dpar->lib_amp_abstol;
fpartype[p] = SPINPAR;
}
if (dmod->spin.lib_freq.state == 'f') {
fpntr[++p] = &dmod->spin.lib_freq.val;
fparstep[p] = dpar->lib_freq_step;
fpartol[p] = dpar->lib_freq_tol;
fparabstol[p] = dpar->lib_freq_abstol;
fpartype[p] = SPINPAR;
}
if (dmod->spin.lib_phase.state == 'f') {
fpntr[++p] = &dmod->spin.lib_phase.val;
fparstep[p] = dpar->lib_phase_step;
fpartol[p] = dpar->lib_phase_tol;
fparabstol[p] = dpar->lib_phase_abstol;
fpartype[p] = SPINPAR;
}
}
}
__global__ void mpl_dat_krnl(struct par_t *dpar, struct dat_t *ddat,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype, int nsets) {
/* nsets-threaded kernel */
//int s = blockIdx.x * blockDim.x + threadIdx.x;
int i, j;
for (int s=0; s<nsets; s++) {
for (i=0; i<=2; i++) {
if (ddat->set[s].angleoff[i].state == 'f') {
fpntr[++p] = &ddat->set[s].angleoff[i].val;
fparstep[p] = dpar->angle_step;
fpartol[p] = dpar->angle_tol;
fparabstol[p] = dpar->angle_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (ddat->set[s].omegaoff[i].state == 'f') {
fpntr[++p] = &ddat->set[s].omegaoff[i].val;
fparstep[p] = dpar->spin_step;
fpartol[p] = dpar->spin_tol;
fparabstol[p] = dpar->spin_abstol;
fpartype[p] = SPINPAR;
}
}
switch (ddat->set[s].type) {
case DELAY:
for (i=0; i<=ddat->set[s].desc.deldop.delcor.n; i++) {
if (ddat->set[s].desc.deldop.delcor.a[i].state == 'f') {
j = (i < MAXDELCORPAR) ? i : 0;
fpntr[++p] = &ddat->set[s].desc.deldop.delcor.a[i].val;
fparstep[p] = dpar->delcor_step[j];
fpartol[p] = dpar->delcor_tol;
fparabstol[p] = dpar->delcor_abstol[j];
fpartype[p] = DELCORPAR;
}
}
if (ddat->set[s].desc.deldop.dopscale.state == 'f') {
fpntr[++p] = &ddat->set[s].desc.deldop.dopscale.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = DOPSCALEPAR;
}
break;
case DOPPLER:
for (i=0; i<=ddat->set[s].desc.doppler.delcor.n; i++) {
if (ddat->set[s].desc.doppler.delcor.a[i].state == 'f') {
j = (i < MAXDELCORPAR) ? i : 0;
fpntr[++p] = &ddat->set[s].desc.doppler.delcor.a[i].val;
fparstep[p] = dpar->delcor_step[j];
fpartol[p] = dpar->delcor_tol;
fparabstol[p] = dpar->delcor_abstol[j];
fpartype[p] = DELCORPAR;
}
}
if (ddat->set[s].desc.doppler.dopscale.state == 'f') {
fpntr[++p] = &ddat->set[s].desc.doppler.dopscale.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = DOPSCALEPAR;
}
break;
case POS:
for (i=0; i<ddat->set[s].desc.poset.nframes; i++) {
for (j=0; j<=1; j++) {
if (ddat->set[s].desc.poset.frame[i].off[j].state == 'f') {
fpntr[++p] = &ddat->set[s].desc.poset.frame[i].off[j].val;
fparstep[p] = dpar->xyoff_step;
fpartol[p] = dpar->xyoff_tol;
fparabstol[p] = dpar->xyoff_abstol;
fpartype[p] = XYOFFPAR;
}
}
}
break;
case LGHTCRV:
break;
default:
printf("mkparlist_cuda.cu: can't handle that type of data yet\n");
}
}
}
__global__ void mpl_dat_MFS_krnl(struct par_t *dpar, struct dat_t *ddat,
double **fpntr, double *fparstep, double *fpartol, double *fparabstol,
int *fpartype, int nsets) {
/* nsets-threaded kernel */
//int s = blockIdx.x * blockDim.x + threadIdx.x;
int i, j;
for (int s=0; s<nsets; s++) {
for (int f=0; f<ddat->set[s].desc.deldop.nframes; f++) {
for (i=0; i<=2; i++) {
if (ddat->set[s].desc.deldop.frame[f].angleoff[i].state == 'f') {
fpntr[++p] = &ddat->set[s].desc.deldop.frame[f].angleoff[i].val;
fparstep[p] = dpar->angle_step;
fpartol[p] = dpar->angle_tol;
fparabstol[p] = dpar->angle_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=2; i++) {
if (ddat->set[s].desc.deldop.frame[f].omegaoff[i].state == 'f') {
fpntr[++p] = &ddat->set[s].desc.deldop.frame[f].omegaoff[i].val;
fparstep[p] = dpar->spin_step;
fpartol[p] = dpar->spin_tol;
fparabstol[p] = dpar->spin_abstol;
fpartype[p] = SPINPAR;
}
}
for (i=0; i<=ddat->set[s].desc.deldop.frame[f].delcor.n; i++) {
if (ddat->set[s].desc.deldop.frame[f].delcor.a[i].state == 'f') {
j = (i < MAXDELCORPAR) ? i : 0;
fpntr[++p] = &ddat->set[s].desc.deldop.frame[f].delcor.a[i].val;
fparstep[p] = dpar->delcor_step[j];
fpartol[p] = dpar->delcor_tol;
fparabstol[p] = dpar->delcor_abstol[j];
fpartype[p] = DELCORPAR;
}
}
if (ddat->set[s].desc.deldop.frame[f].dopscale.state == 'f') {
fpntr[++p] = &ddat->set[s].desc.deldop.frame[f].dopscale.val;
fparstep[p] = dpar->ratio_step;
fpartol[p] = dpar->ratio_tol;
fparabstol[p] = dpar->ratio_abstol;
fpartype[p] = DOPSCALEPAR;
}
}
}
}
__host__ void mkparlist_gpu(struct par_t *dpar, struct mod_t *dmod,
struct dat_t *ddat, double *fparstep, double *fpartol,
double *fparabstol, int *fpartype, double **fpntr,
int nfpar, int nsets)
{
dim3 BLK,THD;
THD.x = maxThreadsPerBlock;
gpuErrchk(cudaSetDevice(GPU0));
/* Shape parameters - single component only */
//for (i=0; i<dmod->shape.ncomp; i++) { /* read each component */
/* Launch first parameter kernel */
mpl_comp_krnl<<<1,1>>>(dpar, dmod, fpntr, fparstep, fpartol,
fparabstol, fpartype);
checkErrorAfterKernelLaunch("mpl_comp_krnl (mkparlist_cuda.cu)");
/* Photometric parameters - only one radlaw at a time for now */
//for (ilaw=0; ilaw<dmod->photo.nradlaws; ilaw++) {
/* Launch photometric kernel */
mpl_rad_krnl<<<1,1>>>(dpar, dmod, fpntr, fparstep, fpartol,
fparabstol, fpartype);
checkErrorAfterKernelLaunch("mpl_rad_krnl (mkparlist_cuda.cu)");
/* Photometric parameters - only one optlaw at a time */
//for (ilaw=0; ilaw<dmod->photo.noptlaws; ilaw++) {
mpl_photo_krnl<<<1,1>>>(dpar, dmod, fpntr, fparstep, fpartol,
fparabstol, fpartype);
checkErrorAfterKernelLaunch("mpl_photo_krnl (mkparlist_cuda.cu)");
/* Spin parameters */
mpl_spin_krnl<<<1,1>>>(dpar, dmod, fpntr, fparstep, fpartol,
fparabstol, fpartype);
checkErrorAfterKernelLaunch("mpl_spin_krnl (mkparlist_cuda.cu)");
/* Data parameters (i.e., those in the obs file, other than the "calfact"
* parameters which are computed analytically)
* Launching nsets threads here */
BLK.x = floor((THD.x - 1 + nsets)/THD.x);
mpl_dat_krnl<<<1,1>>>(dpar, ddat, fpntr, fparstep, fpartol,
fparabstol, fpartype, nsets);
checkErrorAfterKernelLaunch("mpl_dat_krnl (mkparlist_cuda.cu)");
}
|
919c08c09ef2d1b1e89d4281e74f37013ff21d5a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <vector>
#include <set>
#include <iterator>
#include <algorithm>
#include <time.h>
using namespace std;
// Training image file name
const string training_image_fn = "train-images.idx3-ubyte";
// Training label file name
const string training_label_fn = "train-labels.idx1-ubyte";
int classes = 10;
int iteration = 5000;
__global__
void saxpy(float n, float a, float *x, float *w, float *sum)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("%d", index);
int classes = 10;
for (int i = index; i < n; i += stride)
for(int k = 0; k < classes; k++) {
sum[i + k * (int)n] = w[i + k * (int)n]*x[i] + a;
//sum[i + k * (int)n] = intermediateW[i + k * (int)n];
}
}
__global__
void sum_cuda(float n, float *sum, float *total, int run)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Index --- %d", index);
//int classes = 1;
for (int idx = index; idx < n; idx += stride) {
//for(int k = 0; k < classes; k++) {
register int i = atomicAdd(&total[0], sum[idx + run * (int)n]);
sum[i + run * (int)n] = idx;
//}
}
//for (int idx = index; idx < classes; idx += stride) {
//printf("i = %d %f\n", i, sum[i]);
// for(int k = 0; k < n; k++) {
// //printf("i = %d %f\n",i, sum[i]);
// //sum[i] += w[i + k * (int)n];
// sum[i] += w[i*(int)n + k];
// //printf("%f\n",sum[i]);
// }
// register int i = atomicAdd(total, sum[idx]);
// sum[i] = idx;
//printf("cuda --- %f\n",sum[i]);
//}
}
__global__
void updateWeights(float n, float *err, float *w, float *x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int classes = 10;
//float a;
for (int i = index; i < n; i += stride)
for(int k = 0; k < classes; k++) {
//printf(" %f ", w[i + k * (int)n] );
//a = w[i + k * (int)n];
w[i + k * (int)n] -= 0.001 * ( ( -1 * err[k] * x[i] ) + w[i + k * (int)n] );
//printf(" %f after %f changes required %f\n", a, w[i + k * (int)n], err[k] );
}
//printf(" after changes required %f\n", err[0] );
//theta[m2][n] += (alpha * (labelTrain[j][m2] - prob[m2]) * dataTrain[j][n]);
}
void softMax(float *sum)
{
float total = 0.0f;
for (int i = 0; i < classes; i += 1)
total += exp(sum[i]);
for (int i = 0; i < classes; i += 1)
sum[i] = exp(sum[i]) / total;
}
// Software: Training Artificial Neural Network for MNIST database
// Author: Hy Truong Son
// Major: BSc. Computer Science
// Class: 2013 - 2016
// Institution: Eotvos Lorand University
// Email: [email protected]
// Website: http://people.inf.elte.hu/hytruongson/
// Copyright 2015 (c). All rights reserved.
// File stream to read data (image, label) and write down a report
ifstream image;
ifstream label;
ofstream report;
// Number of training samples
const int nTraining = 1;
// Image size in MNIST database
const int width = 28;
const int height = 28;
// Image. In MNIST: 28x28 gray scale images.
int d[width][height];
char inputNum;
void input() {
// Reading image
for(int i = 0; i < 1; i++ ) {
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
image.read(&inputNum, sizeof(char));
if (inputNum == 0) {
d[i][j] = 0;
} else {
d[i][j] = 1;
}
}
}
label.read(&inputNum, sizeof(char));
// cout << "Label:" << (int)inputNum << endl;
}
}
void check(float *sum, int N){
float total = 0.0f;
for(int j = 0; j < N; j++)
total += sum[j];
// cout<<total<< endl;
}
int main( int argc, char *argv[] )
{
float *x, *d_x, *d_w, *w, *sum, *d_sum;
//float total = 0, *d_total = 0;
float *d_index = 0;
float *h_index = 0;
float err[10], *d_err;
int N = width * height;
// cout << "Starting code....... 124" << endl;
x = (float *)malloc( N * sizeof(float));
w = (float *)malloc( N * classes * sizeof(float));
sum = (float *)malloc( N * classes * sizeof(float));
h_index = (float *)malloc( classes * sizeof(float));
//total = (float *)malloc( classes * sizeof(float) );
for(int i = 0; i < classes; i++)
h_index[i] = 0;
for(int conf = 1; conf <= 200; conf++) {
//iteration = conf * 100;
/*********** initializing wights *******************/
for (int i = 0; i < N; i++) {
for(int j = 0; j < classes; j++)
w[i + j * N] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
/*************Transfer Data from host to device *********************/
hipMalloc(&d_x, N * sizeof(float));
hipMalloc(&d_w, N * classes * sizeof(float));
hipMalloc(&d_sum, N * classes * sizeof(float));
//hipMalloc(&d_total, classes * sizeof(float));
hipMalloc(&d_err, classes * sizeof(float));
hipMalloc( (void**) &d_index, classes * sizeof(float) );
hipMemcpy(d_w, w, N * classes * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_sum, sum, N * classes * sizeof(float), hipMemcpyHostToDevice);
//hipMemcpy(d_total, total, sizeof(float), hipMemcpyHostToDevice);
/*************Transfer Data from host to device *********************/
/*********** initializing wights *******************/
/***************** *****************/
//hipMemcpy(w, d_w, N * classes * sizeof(float), hipMemcpyDeviceToHost);
// for(int k = 9; k < 10; k++) {
// for (int j = 0; j < height; ++j) {
// for (int i = 0; i < width; ++i) {
// cout << " " << w[ (j ) * height + (i ) + k * N];
// }
// cout << endl;
// }
// }
// cout << endl;
// cout << endl;
// cout << endl;
/***************** *****************/
/************************** *************************************************
*******************************
*******************************
******************************* LOAD AND UPDATE
*******************************
*******************************
* ********************* **************************************************/
image.open(training_image_fn.c_str(), ios::in | ios::binary); // Binary image file
label.open(training_label_fn.c_str(), ios::in | ios::binary ); // Binary label file
// Reading file headers
char number;
for (int i = 1; i <= 16; ++i) {
image.read(&number, sizeof(char));
}
for (int i = 1; i <= 8; ++i) {
label.read(&number, sizeof(char));
}
// int blockSize = conf * 4;
// int numBlocks = (N + blockSize - 1) / blockSize;
// int numBlocks = conf * 4;
// cout << "blockSize" << blockSize << "numBlocks" << numBlocks << endl;
// cout << " calculations for reduction : " << conf << endl;
double difference = 0, diff_cpu = 0;
for(int l = 0; l < iteration; l++) {
// blockSize = conf * 4;
// numBlocks = (N + blockSize - 1) / blockSize;
/***************** Image Loading **********************/
for (int sample = 1; sample <= nTraining; ++sample) {
// cout << "Sample ---------- **************" << sample << endl;
input();
}
for (int i = 0; i < N; i++) {
x[i] = (float)d[i % width][i / width];
}
// cout << "Image:" << endl;
// for (int j = 0; j < height; ++j) {
// for (int i = 0; i < width; ++i) {
// cout << x[ (j ) * height + (i )];
// }
// cout << endl;
// }
int hostNum[10];
for(int j = 0; j < classes; j++)
hostNum[j] = 0;
hostNum[(int)inputNum] = 1;
// cout << "Label: ";
// for(int j = 0; j < classes; j++)
// cout << " " << hostNum[j];
// cout << endl;
/***************** Image Loading **********************/
/********* Multiplying ******************/
clock_t t, t1, t2;
// t = clock();
// t1 = clock();
hipMemcpy(d_x, x, N * sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
//int blockSize = 27;
int blockSize = N;
int numBlocks = (N + blockSize - 1) / blockSize;
// saxpy<<<1, 1>>>(N, 0.0f, d_x, d_w, d_sum);
hipLaunchKernelGGL(( saxpy), dim3(numBlocks), dim3(blockSize), 0, 0, N, 0.0f, d_x, d_w, d_sum);
/********* Multiplying ******************/
/***************** *****************/
// hipMemcpy(w, d_sum, N * classes * sizeof(float), hipMemcpyDeviceToHost);
// for(int k = 2; k < 3; k++) {
// for (int j = 0; j < height; ++j) {
// for (int i = 0; i < width; ++i) {
// cout << " " << w[ (j ) * height + (i ) + k * N];
// }
// cout << endl;
// }
// }
// cout << endl;
// cout << endl;
// cout << endl;
/***************** *****************/
/*********** Finding Softmax ************************/
//hipMemcpy(sum, d_sum, N*classes*sizeof(float), hipMemcpyDeviceToHost);
blockSize = 27 * 27;
numBlocks = (classes + blockSize - 1) / blockSize;
// blockSize = conf;
// numBlocks = (classes + blockSize - 1) / blockSize;
// numBlocks = 1;
int max_index = 0;
float total[10], summation = 0;
double time_taken = 0;// = ((double)t)/CLOCKS_PER_SEC; // in seconds
for(int k = 0; k < classes; ++k) {
h_index[0] = 0;
hipMemcpy(d_index, h_index , classes * sizeof(float), hipMemcpyHostToDevice);
t = clock();
hipLaunchKernelGGL(( sum_cuda), dim3(numBlocks), dim3(blockSize), 0, 0, N, d_sum, d_index, k);
t = clock() - t;
time_taken += ((double)t)/CLOCKS_PER_SEC;
hipMemcpy(h_index , d_index, classes * sizeof(int), hipMemcpyDeviceToHost);
total[k] = h_index[0];
// cout << total[k] << endl;
// check( sum + k * N, N);
//summation += total[k];
max_index = total[k] > total[max_index] ? k : max_index;
}
time_taken /= classes;
t1 = clock();
hipMemcpy(sum, d_sum, N*classes*sizeof(float), hipMemcpyDeviceToHost);
check(sum,N);
t2 = clock();
double time_taken_by_cpu = ((double) (t2 - t1) )/CLOCKS_PER_SEC; // in seconds
for(int k = 0; k < classes; ++k) {
//total[k] = total[k] / summation;
// cout << total[k] << endl;
total[k] = total[k] / total[max_index];
summation += total[k];
//max_index = total[k] > total[max_index] ? k : max_index;
}
for(int k = 0; k < classes; ++k) {
total[k] = total[k] / summation;
// cout << total[k] << endl;
max_index = total[k] > total[max_index] ? k : max_index;
// total[k] = total[k] - total[max_index];
// summation += total[k];
//max_index = total[k] > total[max_index] ? k : max_index;
}
/*********** Finding Softmax ************************/
/***************** Checking the softmax **********/
float temp = 0;
for(int k = 0; k < classes; ++k) {
temp += total[k];
}
// cout << temp << " ---- " << max_index << endl;
/***************** Checking the softmax **********/
/*********** Finding Error ************************/
// cout << " Error : ";
for(int k = 0; k < classes; k++) {
err[k] = hostNum[k] - total[k];
// cout << " e: " << err[k];
}
// cout << endl;
hipMemcpy(d_err, err, classes * sizeof(float), hipMemcpyHostToDevice);
/*********** Finding Error ************************/
/************* Updating the weights *******************/
// blockSize = 27 * 27;
// numBlocks = (classes + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( updateWeights), dim3(numBlocks), dim3(blockSize), 0, 0, N, d_err, d_w, d_x); // updateWeights(float n, float *err, float *w, float *x)
/************* Updating the weights *******************/
// t = clock() - t;
// t2 = clock();
// double time_taken = ((double)t)/CLOCKS_PER_SEC; // in seconds
// cout << " saxpy took " << time_taken << "seconds to execute on CUDA " << CLOCKS_PER_SEC << " the t1 = " << t1 << " and t2 = " << t2 << endl;
//cout << endl;
difference += time_taken;
diff_cpu += time_taken_by_cpu;
}
cout << iteration << " total time : " << (difference / iteration ) << " " << ( diff_cpu / iteration ) << endl;
//cout << iteration << " total time : " << (difference / iteration ) << endl;
report.close();
image.close();
label.close();
}
/************************** *************************************************
*******************************
*******************************
******************************* LOAD AND UPDATE
*******************************
*******************************
* ********************* **************************************************/
/***************** *****************/
// hipMemcpy(w, d_w, N * classes * sizeof(float), hipMemcpyDeviceToHost);
// for(int k = 0; k < 10; k++) {
// for (int j = 0; j < height; ++j) {
// for (int i = 0; i < width; ++i) {
// cout << " " << (int) ( w[ (j ) * height + (i ) + k * N] * 200);
// }
// cout << endl;
// }
// cout << endl;
// cout << endl;
// }
/***************** *****************/
// /***************** Checking the softmax **********/
// float temp = 0;
// for(int k = 0; k < classes; ++k) {
// temp += total[k];
// }
// cout << temp << " " << max_index << endl;
/***************** Checking the softmax **********/
}
|
919c08c09ef2d1b1e89d4281e74f37013ff21d5a.cu
|
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <string>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <vector>
#include <set>
#include <iterator>
#include <algorithm>
#include <time.h>
using namespace std;
// Training image file name
const string training_image_fn = "train-images.idx3-ubyte";
// Training label file name
const string training_label_fn = "train-labels.idx1-ubyte";
int classes = 10;
int iteration = 5000;
__global__
void saxpy(float n, float a, float *x, float *w, float *sum)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("%d", index);
int classes = 10;
for (int i = index; i < n; i += stride)
for(int k = 0; k < classes; k++) {
sum[i + k * (int)n] = w[i + k * (int)n]*x[i] + a;
//sum[i + k * (int)n] = intermediateW[i + k * (int)n];
}
}
__global__
void sum_cuda(float n, float *sum, float *total, int run)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
//printf("Index --- %d", index);
//int classes = 1;
for (int idx = index; idx < n; idx += stride) {
//for(int k = 0; k < classes; k++) {
register int i = atomicAdd(&total[0], sum[idx + run * (int)n]);
sum[i + run * (int)n] = idx;
//}
}
//for (int idx = index; idx < classes; idx += stride) {
//printf("i = %d %f\n", i, sum[i]);
// for(int k = 0; k < n; k++) {
// //printf("i = %d %f\n",i, sum[i]);
// //sum[i] += w[i + k * (int)n];
// sum[i] += w[i*(int)n + k];
// //printf("%f\n",sum[i]);
// }
// register int i = atomicAdd(total, sum[idx]);
// sum[i] = idx;
//printf("cuda --- %f\n",sum[i]);
//}
}
__global__
void updateWeights(float n, float *err, float *w, float *x)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int classes = 10;
//float a;
for (int i = index; i < n; i += stride)
for(int k = 0; k < classes; k++) {
//printf(" %f ", w[i + k * (int)n] );
//a = w[i + k * (int)n];
w[i + k * (int)n] -= 0.001 * ( ( -1 * err[k] * x[i] ) + w[i + k * (int)n] );
//printf(" %f after %f changes required %f\n", a, w[i + k * (int)n], err[k] );
}
//printf(" after changes required %f\n", err[0] );
//theta[m2][n] += (alpha * (labelTrain[j][m2] - prob[m2]) * dataTrain[j][n]);
}
void softMax(float *sum)
{
float total = 0.0f;
for (int i = 0; i < classes; i += 1)
total += exp(sum[i]);
for (int i = 0; i < classes; i += 1)
sum[i] = exp(sum[i]) / total;
}
// Software: Training Artificial Neural Network for MNIST database
// Author: Hy Truong Son
// Major: BSc. Computer Science
// Class: 2013 - 2016
// Institution: Eotvos Lorand University
// Email: [email protected]
// Website: http://people.inf.elte.hu/hytruongson/
// Copyright 2015 (c). All rights reserved.
// File stream to read data (image, label) and write down a report
ifstream image;
ifstream label;
ofstream report;
// Number of training samples
const int nTraining = 1;
// Image size in MNIST database
const int width = 28;
const int height = 28;
// Image. In MNIST: 28x28 gray scale images.
int d[width][height];
char inputNum;
void input() {
// Reading image
for(int i = 0; i < 1; i++ ) {
for (int j = 0; j < height; ++j) {
for (int i = 0; i < width; ++i) {
image.read(&inputNum, sizeof(char));
if (inputNum == 0) {
d[i][j] = 0;
} else {
d[i][j] = 1;
}
}
}
label.read(&inputNum, sizeof(char));
// cout << "Label:" << (int)inputNum << endl;
}
}
void check(float *sum, int N){
float total = 0.0f;
for(int j = 0; j < N; j++)
total += sum[j];
// cout<<total<< endl;
}
int main( int argc, char *argv[] )
{
float *x, *d_x, *d_w, *w, *sum, *d_sum;
//float total = 0, *d_total = 0;
float *d_index = 0;
float *h_index = 0;
float err[10], *d_err;
int N = width * height;
// cout << "Starting code....... 124" << endl;
x = (float *)malloc( N * sizeof(float));
w = (float *)malloc( N * classes * sizeof(float));
sum = (float *)malloc( N * classes * sizeof(float));
h_index = (float *)malloc( classes * sizeof(float));
//total = (float *)malloc( classes * sizeof(float) );
for(int i = 0; i < classes; i++)
h_index[i] = 0;
for(int conf = 1; conf <= 200; conf++) {
//iteration = conf * 100;
/*********** initializing wights *******************/
for (int i = 0; i < N; i++) {
for(int j = 0; j < classes; j++)
w[i + j * N] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);
}
/*************Transfer Data from host to device *********************/
cudaMalloc(&d_x, N * sizeof(float));
cudaMalloc(&d_w, N * classes * sizeof(float));
cudaMalloc(&d_sum, N * classes * sizeof(float));
//cudaMalloc(&d_total, classes * sizeof(float));
cudaMalloc(&d_err, classes * sizeof(float));
cudaMalloc( (void**) &d_index, classes * sizeof(float) );
cudaMemcpy(d_w, w, N * classes * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_sum, sum, N * classes * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(d_total, total, sizeof(float), cudaMemcpyHostToDevice);
/*************Transfer Data from host to device *********************/
/*********** initializing wights *******************/
/***************** *****************/
//cudaMemcpy(w, d_w, N * classes * sizeof(float), cudaMemcpyDeviceToHost);
// for(int k = 9; k < 10; k++) {
// for (int j = 0; j < height; ++j) {
// for (int i = 0; i < width; ++i) {
// cout << " " << w[ (j ) * height + (i ) + k * N];
// }
// cout << endl;
// }
// }
// cout << endl;
// cout << endl;
// cout << endl;
/***************** *****************/
/************************** *************************************************
*******************************
*******************************
******************************* LOAD AND UPDATE
*******************************
*******************************
* ********************* **************************************************/
image.open(training_image_fn.c_str(), ios::in | ios::binary); // Binary image file
label.open(training_label_fn.c_str(), ios::in | ios::binary ); // Binary label file
// Reading file headers
char number;
for (int i = 1; i <= 16; ++i) {
image.read(&number, sizeof(char));
}
for (int i = 1; i <= 8; ++i) {
label.read(&number, sizeof(char));
}
// int blockSize = conf * 4;
// int numBlocks = (N + blockSize - 1) / blockSize;
// int numBlocks = conf * 4;
// cout << "blockSize" << blockSize << "numBlocks" << numBlocks << endl;
// cout << " calculations for reduction : " << conf << endl;
double difference = 0, diff_cpu = 0;
for(int l = 0; l < iteration; l++) {
// blockSize = conf * 4;
// numBlocks = (N + blockSize - 1) / blockSize;
/***************** Image Loading **********************/
for (int sample = 1; sample <= nTraining; ++sample) {
// cout << "Sample ---------- **************" << sample << endl;
input();
}
for (int i = 0; i < N; i++) {
x[i] = (float)d[i % width][i / width];
}
// cout << "Image:" << endl;
// for (int j = 0; j < height; ++j) {
// for (int i = 0; i < width; ++i) {
// cout << x[ (j ) * height + (i )];
// }
// cout << endl;
// }
int hostNum[10];
for(int j = 0; j < classes; j++)
hostNum[j] = 0;
hostNum[(int)inputNum] = 1;
// cout << "Label: ";
// for(int j = 0; j < classes; j++)
// cout << " " << hostNum[j];
// cout << endl;
/***************** Image Loading **********************/
/********* Multiplying ******************/
clock_t t, t1, t2;
// t = clock();
// t1 = clock();
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
//int blockSize = 27;
int blockSize = N;
int numBlocks = (N + blockSize - 1) / blockSize;
// saxpy<<<1, 1>>>(N, 0.0f, d_x, d_w, d_sum);
saxpy<<<numBlocks, blockSize>>>(N, 0.0f, d_x, d_w, d_sum);
/********* Multiplying ******************/
/***************** *****************/
// cudaMemcpy(w, d_sum, N * classes * sizeof(float), cudaMemcpyDeviceToHost);
// for(int k = 2; k < 3; k++) {
// for (int j = 0; j < height; ++j) {
// for (int i = 0; i < width; ++i) {
// cout << " " << w[ (j ) * height + (i ) + k * N];
// }
// cout << endl;
// }
// }
// cout << endl;
// cout << endl;
// cout << endl;
/***************** *****************/
/*********** Finding Softmax ************************/
//cudaMemcpy(sum, d_sum, N*classes*sizeof(float), cudaMemcpyDeviceToHost);
blockSize = 27 * 27;
numBlocks = (classes + blockSize - 1) / blockSize;
// blockSize = conf;
// numBlocks = (classes + blockSize - 1) / blockSize;
// numBlocks = 1;
int max_index = 0;
float total[10], summation = 0;
double time_taken = 0;// = ((double)t)/CLOCKS_PER_SEC; // in seconds
for(int k = 0; k < classes; ++k) {
h_index[0] = 0;
cudaMemcpy(d_index, h_index , classes * sizeof(float), cudaMemcpyHostToDevice);
t = clock();
sum_cuda<<<numBlocks, blockSize>>>(N, d_sum, d_index, k);
t = clock() - t;
time_taken += ((double)t)/CLOCKS_PER_SEC;
cudaMemcpy(h_index , d_index, classes * sizeof(int), cudaMemcpyDeviceToHost);
total[k] = h_index[0];
// cout << total[k] << endl;
// check( sum + k * N, N);
//summation += total[k];
max_index = total[k] > total[max_index] ? k : max_index;
}
time_taken /= classes;
t1 = clock();
cudaMemcpy(sum, d_sum, N*classes*sizeof(float), cudaMemcpyDeviceToHost);
check(sum,N);
t2 = clock();
double time_taken_by_cpu = ((double) (t2 - t1) )/CLOCKS_PER_SEC; // in seconds
for(int k = 0; k < classes; ++k) {
//total[k] = total[k] / summation;
// cout << total[k] << endl;
total[k] = total[k] / total[max_index];
summation += total[k];
//max_index = total[k] > total[max_index] ? k : max_index;
}
for(int k = 0; k < classes; ++k) {
total[k] = total[k] / summation;
// cout << total[k] << endl;
max_index = total[k] > total[max_index] ? k : max_index;
// total[k] = total[k] - total[max_index];
// summation += total[k];
//max_index = total[k] > total[max_index] ? k : max_index;
}
/*********** Finding Softmax ************************/
/***************** Checking the softmax **********/
float temp = 0;
for(int k = 0; k < classes; ++k) {
temp += total[k];
}
// cout << temp << " ---- " << max_index << endl;
/***************** Checking the softmax **********/
/*********** Finding Error ************************/
// cout << " Error : ";
for(int k = 0; k < classes; k++) {
err[k] = hostNum[k] - total[k];
// cout << " e: " << err[k];
}
// cout << endl;
cudaMemcpy(d_err, err, classes * sizeof(float), cudaMemcpyHostToDevice);
/*********** Finding Error ************************/
/************* Updating the weights *******************/
// blockSize = 27 * 27;
// numBlocks = (classes + blockSize - 1) / blockSize;
updateWeights<<<numBlocks, blockSize>>>(N, d_err, d_w, d_x); // updateWeights(float n, float *err, float *w, float *x)
/************* Updating the weights *******************/
// t = clock() - t;
// t2 = clock();
// double time_taken = ((double)t)/CLOCKS_PER_SEC; // in seconds
// cout << " saxpy took " << time_taken << "seconds to execute on CUDA " << CLOCKS_PER_SEC << " the t1 = " << t1 << " and t2 = " << t2 << endl;
//cout << endl;
difference += time_taken;
diff_cpu += time_taken_by_cpu;
}
cout << iteration << " total time : " << (difference / iteration ) << " " << ( diff_cpu / iteration ) << endl;
//cout << iteration << " total time : " << (difference / iteration ) << endl;
report.close();
image.close();
label.close();
}
/************************** *************************************************
*******************************
*******************************
******************************* LOAD AND UPDATE
*******************************
*******************************
* ********************* **************************************************/
/***************** *****************/
// cudaMemcpy(w, d_w, N * classes * sizeof(float), cudaMemcpyDeviceToHost);
// for(int k = 0; k < 10; k++) {
// for (int j = 0; j < height; ++j) {
// for (int i = 0; i < width; ++i) {
// cout << " " << (int) ( w[ (j ) * height + (i ) + k * N] * 200);
// }
// cout << endl;
// }
// cout << endl;
// cout << endl;
// }
/***************** *****************/
// /***************** Checking the softmax **********/
// float temp = 0;
// for(int k = 0; k < classes; ++k) {
// temp += total[k];
// }
// cout << temp << " " << max_index << endl;
/***************** Checking the softmax **********/
}
|
e0dd8d4d697033f5f8e5610ff5b4bf3b0e5824de.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <hdf5.h>
#define NPML 10
const float light_velocity = 2.99792458e8; // m s-
const float ep0 = 8.85418781762038920e-12; // F m-1 (permittivity at vacuum)
const float mu0 = 1.25663706143591730e-6; // N A-2 (permeability at vacuum)
const float imp0 = sqrt( mu0/ep0 ); // (impedance at vacuum)
const float pi = 3.14159265358979323846;
const int MBPG = 65535;
const int MTPB = 512;
// Allocate constant memory for CPML
__constant__ float rcmbE[2*(NPML+1)];
__constant__ float rcmaE[2*(NPML+1)];
__constant__ float rcmbH[2*(NPML+1)];
__constant__ float rcmaH[2*(NPML+1)];
typedef struct N3 {
int x, y, z;
} N3;
typedef struct P3F3 {
float ***x, ***y, ***z;
} P3F3;
typedef struct P1F3 {
float *x, *y, *z;
} P1F3;
typedef struct P1F2 {
float *f, *b;
} P1F2;
typedef struct P1F6 {
P1F2 x, y, z;
} P1F6;
__host__ void updateTimer(time_t t0, int tstep, char str[]) {
int elapsedTime=(int)(time(0)-t0);
sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60);
}
__host__ void exec(char *format, ...) {
char str[1024];
va_list ap;
va_start(ap, format);
vsprintf(str, format, ap);
system(str);
}
__host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) {
char filename[1024];
va_list ap;
va_start(ap, format);
vsprintf(filename, format, ap);
hid_t file, dataset, filespace, memspace;
hsize_t dimsm[3] = { Ni, Nj, Nk };
hsize_t start[3] = { is, js, ks };
hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke };
memspace = H5Screate_simple(3, dimsm, 0);
filespace = H5Screate_simple(3, count, 0);
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0);
H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]);
H5Dclose(dataset);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(file);
}
__host__ void print_array(N3 N, float ***a) {
int j,k;
for (j=0; j<N.y; j++) {
for (k=0; k<N.z; k++) {
printf("%1.4f\t", a[N.x/2][j][k]);
}
printf("\n");
}
printf("\n");
}
__host__ float ***makeArray(N3 N) {
float ***f;
f = (float ***) calloc (N.x, sizeof(float **));
f[0] = (float **) calloc (N.y*N.x, sizeof(float *));
f[0][0] = (float *) calloc (N.z*N.y*N.x, sizeof(float));
for (int i=0; i<N.x; i++) f[i] = f[0] + i*N.y;
for (int i=0; i<N.y*N.x; i++) f[0][i] = f[0][0] + i*N.z;
return f;
}
__host__ void set_geometry(N3 N, P3F3 CE) {
int i,j,k;
for (i=0; i<N.x-1; i++) {
for (j=0; j<N.y-1; j++) {
for (k=0; k<N.z-1; k++) {
CE.x[i][j][k] = 0.5;
CE.y[i][j][k] = 0.5;
CE.z[i][j][k] = 0.5;
}
}
}
for (j=0; j<N.y-1; j++) for (k=0; k<N.z-1; k++) CE.x[N.x-1][j][k] = 0.5;
for (i=0; i<N.x-1; i++) for (k=0; k<N.z-1; k++) CE.y[i][N.y-1][k] = 0.5;
for (i=0; i<N.x-1; i++) for (j=0; j<N.y-1; j++) CE.z[i][j][N.z-1] = 0.5;
}
__host__ void verify_16xNz(int Nz) {
int R = Nz%16;
int N1 = Nz-R;
int N2 = N1+16;
if ( R == 0 ) printf("Nz is a multiple of 16.\n");
else {
printf("Error: Nz is not a multiple of 16.\n");
printf("Recommend Nz: %d or %d\n", N1, N2);
exit(0);
}
}
__global__ void initArray(int Ntot, float *a, int idx0) {
int idx = idx0 + blockIdx.x*blockDim.x + threadIdx.x;
if ( idx < Ntot ) a[idx] = 0;
}
__host__ void initMainArrays(int Ntot, P1F3 F) {
int i;
int BPG = Ntot%MTPB == 0 ? Ntot/MTPB : Ntot/MTPB + 1;
int NK = BPG/MBPG + 1;
int sBPG = BPG/NK;
int idx0[NK];
dim3 DG[NK];
for ( i=0; i<NK; i++) {
idx0[i] = MTPB*sBPG*i;
DG[i] = dim3(sBPG);
}
DG[NK-1] = dim3(sBPG+BPG%NK);
dim3 DB(MTPB);
for ( i=0; i<NK; i++) {
hipLaunchKernelGGL(( initArray) , dim3(DG[i]),dim3(DB), 0, 0, Ntot, F.x, idx0[i]);
hipLaunchKernelGGL(( initArray) , dim3(DG[i]),dim3(DB), 0, 0, Ntot, F.y, idx0[i]);
hipLaunchKernelGGL(( initArray) , dim3(DG[i]),dim3(DB), 0, 0, Ntot, F.z, idx0[i]);
}
printf("main init: Ntot=%d, BPG=%d, sBPG(%d)=%d\n", Ntot, BPG, NK, sBPG);
}
__host__ void initPsiArrays(int Ntot, int BPG, P1F2 psi1, P1F2 psi2) {
hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(MTPB)), 0, 0, Ntot, psi1.f, 0);
hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(MTPB)), 0, 0, Ntot, psi1.b, 0);
hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(MTPB)), 0, 0, Ntot, psi2.f, 0);
hipLaunchKernelGGL(( initArray) , dim3(dim3(BPG)),dim3(dim3(MTPB)), 0, 0, Ntot, psi2.b, 0);
}
__host__ void freeMainArrays(P1F3 F) {
hipFree(F.x);
hipFree(F.y);
hipFree(F.z);
}
__host__ void freePsiArrays(P1F6 psix, P1F6 psiy, P1F6 psiz) {
hipFree(psix.y.f);
hipFree(psix.y.b);
hipFree(psix.z.f);
hipFree(psix.z.b);
hipFree(psiy.z.f);
hipFree(psiy.z.b);
hipFree(psiy.x.f);
hipFree(psiy.x.b);
hipFree(psiz.x.f);
hipFree(psiz.x.b);
hipFree(psiz.y.f);
hipFree(psiz.y.b);
}
__global__ void updateE(N3 N, P1F3 E, P1F3 H, P1F3 CE, int idx0) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk + idx0;
int Nyz = N.y*N.z;
int eidx = idx + Nyz;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
float* hz = (float*) &hy[blockDim.x+1];
hx[tk] = H.x[idx];
hy[tk] = H.y[idx];
hz[tk] = H.z[idx];
if ( tk==blockDim.x-1 ) {
hx[tk+1] = H.x[idx+1];
hy[tk+1] = H.y[idx+1];
}
__syncthreads();
E.x[eidx] += CE.x[idx]*( H.z[idx+N.z] - hz[tk] - hy[tk+1] + hy[tk] );
E.y[eidx] += CE.y[idx]*( hx[tk+1] - hx[tk] - H.z[idx+Nyz] + hz[tk] );
E.z[eidx] += CE.z[idx]*( H.y[idx+Nyz] - hy[tk] - H.x[idx+N.z] + hx[tk] );
}
__global__ void updateH(N3 N, P1F3 E, P1F3 H, int idx0) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk + idx0;
int Nyz = N.y*N.z;
int eidx = idx + Nyz;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
float* ez = (float*) &ey[blockDim.x+1];
ex[tk+1] = E.x[eidx];
ey[tk+1] = E.y[eidx];
ez[tk] = E.z[eidx];
if ( tk==0 ) {
ex[0] = E.x[eidx-1];
ey[0] = E.y[eidx-1];
}
__syncthreads();
H.x[idx] -= 0.5*( ez[tk] - E.z[eidx-N.z] - ey[tk+1] + ey[tk] );
H.y[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + E.z[eidx-Nyz] );
H.z[idx] -= 0.5*( ey[tk+1] - E.y[eidx-Nyz] - ex[tk+1] + E.x[eidx-N.z] );
}
__global__ void updateSrc(N3 N, P1F3 E, int tstep) {
int idx = threadIdx.x;
//int ijk = idx*N.y*N.z + (N.y/2)*N.z + (N.z/2);
int ijk = (N.x/2+1)*N.y*N.z + (N.y/2)*N.z + idx;
//E.x[ijk] += sin(0.1*tstep);
E.z[ijk] += sin(0.1*tstep);
}
__global__ void updateCPMLxE_cmem(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = N.y*N.z;
int pi = pidx/Nyz + backward*(NPML+1);
int idx = pidx + backward*(N.x-NPML-1)*Nyz;
int eidx = idx + Nyz;
psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( H.z[idx+Nyz] - H.z[idx] );
E.y[eidx] -= CE.y[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( H.y[idx+Nyz] - H.y[idx] );
E.z[eidx] += CE.z[idx]*psi2[pidx];
}
__global__ void updateCPMLxH_cmem(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = N.y*N.z;
int pi = pidx/Nyz + backward*(NPML+1);
int idx = pidx + backward*(N.x-NPML)*Nyz;
int eidx = idx + Nyz;
psi1[pidx] = rcmbH[pi]*psi1[pidx] + rcmaH[pi]*( E.z[eidx] - E.z[eidx-Nyz] );
H.y[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pi]*psi2[pidx] + rcmaH[pi]*( E.y[eidx] - E.y[eidx-Nyz] );
H.z[idx] -= 0.5*psi2[pidx];
}
__global__ void updateCPMLyE_cmem1(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int i = pidx/(NPML*N.z);
int pj = ( pidx - i*NPML*N.z )/N.z + backward*(NPML+1);
int idx = pidx + (i+backward)*(N.y-NPML)*N.z - backward*N.z;
//int idx = pidx + i*(N.y-NPML)*N.z + backward*(N.y-NPML-1)*N.z;
int eidx = idx + N.y*N.z;
psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( H.x[idx+N.z] - H.x[idx] );
E.z[eidx] -= CE.z[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( H.z[idx+N.z] - H.z[idx] );
E.x[eidx] += CE.x[idx]*psi2[pidx];
}
__global__ void updateCPMLyH_cmem1(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int i = pidx/(NPML*N.z);
int pj = ( pidx - i*NPML*N.z )/N.z + backward*(NPML+1);
int idx = pidx + (i+backward)*(N.y-NPML)*N.z;
int eidx = idx + N.y*N.z;
psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( E.x[eidx] - E.x[eidx-N.z] );
H.z[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( E.z[eidx] - E.z[eidx-N.z] );
H.x[idx] -= 0.5*psi2[pidx];
}
__global__ void updateCPMLyE_cmem2(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int j = pidx/(N.x*N.z);
int i = (pidx - j*N.x*N.z)/N.z;
int k = pidx%N.z;
int pj = j + backward*(NPML+1);
int idx = k + (j + i*N.y)*N.z + backward*(N.y-NPML-1)*N.z;
int eidx = idx + N.y*N.z;
psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( H.x[idx+N.z] - H.x[idx] );
E.z[eidx] -= CE.z[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( H.z[idx+N.z] - H.z[idx] );
E.x[eidx] += CE.x[idx]*psi2[pidx];
}
__global__ void updateCPMLyH_cmem2(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int j = pidx/(N.x*N.z);
int i = (pidx - j*N.x*N.z)/N.z;
int k = pidx%N.z;
int pj = j + backward*(NPML+1);
int idx = k + (j + i*N.y)*N.z + backward*(N.y-NPML)*N.z;
int eidx = idx + N.y*N.z;
psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( E.x[eidx] - E.x[eidx-N.z] );
H.z[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( E.z[eidx] - E.z[eidx-N.z] );
H.x[idx] -= 0.5*psi2[pidx];
}
int main() {
int tstep;
char time_str[32];
time_t t0;
int i;
// --------------------------------------------------------------------------------
// Set the parameters
N3 N;
N.x = 200;
N.y = 300;
N.z = 208;
//N.y = 16;
//N.z = 20;
int TMAX = 10000;
float S = 0.5;
float dx = 10e-9;
float dt = S*dx/light_velocity;
int Npml = NPML;
printf("Npml=%d\n", Npml);
printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX);
verify_16xNz( N.z );
// --------------------------------------------------------------------------------
// Allocate host memory
P3F3 CE;
CE.x = makeArray(N);
CE.y = makeArray(N);
CE.z = makeArray(N);
float ***Ex, ***Ez;
N3 Nxp;
Nxp.x = N.x+1;
Nxp.y = N.y;
Nxp.z = N.z;
Ex = makeArray(Nxp);
Ez = makeArray(Nxp);
// --------------------------------------------------------------------------------
// Geometry
set_geometry(N, CE);
// --------------------------------------------------------------------------------
// Parameters for CPML
int m = 4; // grade_order
float sigma_max = (m+1.)/(15*pi*Npml*dx);
float alpha = 0.05;
float *sigmaE, *bE, *aE;
float *sigmaH, *bH, *aH;
sigmaE = (float *) calloc (2*(Npml+1), sizeof(float));
sigmaH = (float *) calloc (2*(Npml+1), sizeof(float));
bE = (float *) calloc (2*(Npml+1), sizeof(float));
bH = (float *) calloc (2*(Npml+1), sizeof(float));
aE = (float *) calloc (2*(Npml+1), sizeof(float));
aH = (float *) calloc (2*(Npml+1), sizeof(float));
for (i=0; i<Npml; i++) {
sigmaE[i] = pow( (Npml-0.5-i)/Npml, m )*sigma_max;
sigmaE[i+Npml+1] = pow( (0.5+i)/Npml, m )*sigma_max;
sigmaH[i] = pow( (float)(Npml-i)/Npml, m )*sigma_max;
sigmaH[i+Npml+1] = pow( (1.+i)/Npml, m )*sigma_max;
}
for (i=0; i<2*(Npml+1); i++) {
bE[i] = exp( -(sigmaE[i] + alpha)*dt/ep0 );
bH[i] = exp( -(sigmaH[i] + alpha)*dt/ep0 );
aE[i] = sigmaE[i]/(sigmaE[i]+alpha)*(bE[i]-1);
aH[i] = sigmaH[i]/(sigmaH[i]+alpha)*(bH[i]-1);
//printf("[%d]\tsigmaE=%g,\tbE=%g,aE=%g\n", i, sigmaE[i], bE[i], aE[i]);
//printf("[%d]\tsigmaH=%g,\tbH=%g,aH=%g\n", i, sigmaH[i], bH[i], aH[i]);
}
bE[Npml] = 0;
bH[Npml] = 0;
aE[2*Npml+1] = 0;
aH[2*Npml+1] = 0;
free(sigmaE);
free(sigmaH);
// --------------------------------------------------------------------------------
// Copy arrays from host to constant memory
hipMemcpyToSymbol(rcmbE, bE, 2*Npml*sizeof(float));
hipMemcpyToSymbol(rcmaE, aE, 2*Npml*sizeof(float));
hipMemcpyToSymbol(rcmbH, bH, 2*Npml*sizeof(float));
hipMemcpyToSymbol(rcmaH, aH, 2*Npml*sizeof(float));
// --------------------------------------------------------------------------------
// Set the GPU parameters
// TPB: Number of threads per block
// BPG: Number of thread blocks per grid
int BPG;
dim3 DB(MTPB);
// main update
int Ntotmain = N.x*N.y*N.z;
int BPGmain = BPG = Ntotmain%MTPB == 0 ? Ntotmain/MTPB : Ntotmain/MTPB + 1;
int NK = BPG/MBPG + 1; // Number of kernel
int sBPG = BPG/NK;
int idx0[NK];
dim3 DGmain[NK];
for ( i=0; i<NK; i++ ) {
idx0[i] = MTPB*sBPG*i;
DGmain[i] = dim3(sBPG);
}
DGmain[NK-1] = dim3(sBPG+BPG%NK);
size_t NSmain = sizeof(float)*( 2*(MTPB+1)+MTPB );
printf("main: Ntot=%d(%dx%dx%d), BPG=%d, sBPG(%d)=%d, NS=%d\n", Ntotmain, N.x, N.y, N.z, BPGmain, NK, sBPG, NSmain);
// source
// int TPB = N.x;
int TPB = N.z;
BPG = 1;
dim3 DBsrc(TPB);
dim3 DGsrc(BPG);
printf("source: TPB=%d, BPG=%d\n", TPB, BPG);
// cpml
int Ntotpmlx = Npml*N.y*N.z;
int BPGpmlx = Ntotpmlx%MTPB == 0 ? Ntotpmlx/MTPB : Ntotpmlx/MTPB + 1;
dim3 DGpmlx = dim3(BPGpmlx);
printf("pml (x): Ntot=%d(%dx%dx%d), BPG=%d\n", Ntotpmlx, Npml, N.y, N.z, BPGpmlx);
int Ntotpmly = N.x*Npml*N.z;
int BPGpmly = Ntotpmly%MTPB == 0 ? Ntotpmly/MTPB : Ntotpmly/MTPB + 1;
dim3 DGpmly = dim3(BPGpmly);
printf("pml (y): Ntot=%d(%dx%dx%d), BPG=%d\n", Ntotpmly, N.x, Npml, N.z, BPGpmly);
int Npml_pitch = (Npml/16 + 1)*16;
int Ntotpmlz = N.x*N.y*Npml_pitch;
int BPGpmlz = Ntotpmlz%MTPB == 0 ? Ntotpmlz/MTPB : Ntotpmlz/MTPB + 1;
dim3 DGpmlz = dim3(BPGpmlz);
printf("pml (z): Ntot=%d(%dx%dx%d), BPG=%d\n", Ntotpmlz, N.x, N.y, Npml_pitch, BPGpmlz);
// --------------------------------------------------------------------------------
// Allocate device memory
P1F3 devE, devH;
P1F3 devCE;
int surplus, Nthreads;
int N_devF, N_devC;
size_t size_devF, size_devC;
Nthreads = BPGmain*MTPB;
surplus = Nthreads - Ntotmain;
N_devF = Nthreads + N.y*N.z;
size_devF = N_devF*sizeof(float);
N_devC = Nthreads;
size_devC = N_devC*sizeof(float);
printf("surplus main: %d\n", surplus);
hipMalloc ( (void**) &devE.x, size_devF );
hipMalloc ( (void**) &devE.y, size_devF );
hipMalloc ( (void**) &devE.z, size_devF );
hipMalloc ( (void**) &devH.x, size_devF );
hipMalloc ( (void**) &devH.y, size_devF );
hipMalloc ( (void**) &devH.z, size_devF );
hipMalloc ( (void**) &devCE.x, size_devC );
hipMalloc ( (void**) &devCE.y, size_devC );
hipMalloc ( (void**) &devCE.z, size_devC );
// --------------------------------------------------------------------------------
// Allocate device memory for CPML
P1F6 psixE, psiyE, psizE;
P1F6 psixH, psiyH, psizH;
int N_psix, N_psiy, N_psiz;
size_t size_psix, size_psiy, size_psiz;
N_psix = Nthreads = BPGpmlx*MTPB;
size_psix = Nthreads*sizeof(float);
surplus = Nthreads - Ntotpmlx;
//printf("Nthreads=%d, Ntotpmlx=%d\n", Nthreads, Ntotpmlx);
printf("surplus pml(x): %d\n", surplus);
hipMalloc ( (void**) &psixE.y.f, size_psix );
hipMalloc ( (void**) &psixE.y.b, size_psix );
hipMalloc ( (void**) &psixE.z.f, size_psix );
hipMalloc ( (void**) &psixE.z.b, size_psix );
hipMalloc ( (void**) &psixH.y.f, size_psix );
hipMalloc ( (void**) &psixH.y.b, size_psix );
hipMalloc ( (void**) &psixH.z.f, size_psix );
hipMalloc ( (void**) &psixH.z.b, size_psix );
N_psiy = Nthreads = BPGpmly*MTPB;
size_psiy = Nthreads*sizeof(float);
surplus = Nthreads - Ntotpmly;
printf("surplus pml(y): %d\n", surplus);
hipMalloc ( (void**) &psiyE.z.f, size_psiy );
hipMalloc ( (void**) &psiyE.z.b, size_psiy );
hipMalloc ( (void**) &psiyE.x.f, size_psiy );
hipMalloc ( (void**) &psiyE.x.b, size_psiy );
hipMalloc ( (void**) &psiyH.z.f, size_psiy );
hipMalloc ( (void**) &psiyH.z.b, size_psiy );
hipMalloc ( (void**) &psiyH.x.f, size_psiy );
hipMalloc ( (void**) &psiyH.x.b, size_psiy );
N_psiz = Nthreads = BPGpmlz*MTPB;
size_psiz = Nthreads*sizeof(float);
surplus = Nthreads - Ntotpmlz;
printf("surplus pml(z): %d\n", surplus);
hipMalloc ( (void**) &psizE.x.f, size_psiz );
hipMalloc ( (void**) &psizE.x.b, size_psiz );
hipMalloc ( (void**) &psizE.y.f, size_psiz );
hipMalloc ( (void**) &psizE.y.b, size_psiz );
hipMalloc ( (void**) &psizH.x.f, size_psiz );
hipMalloc ( (void**) &psizH.x.b, size_psiz );
hipMalloc ( (void**) &psizH.y.f, size_psiz );
hipMalloc ( (void**) &psizH.y.b, size_psiz );
// --------------------------------------------------------------------------------
// Initialize the device arrays
initMainArrays ( N_devF, devE );
initMainArrays ( N_devF, devH );
initMainArrays ( N_devC, devCE );
initPsiArrays ( N_psix, BPGpmlx, psixE.y, psixE.z );
initPsiArrays ( N_psiy, BPGpmly, psiyE.z, psiyE.x );
initPsiArrays ( N_psiz, BPGpmlz, psizE.x, psizE.y );
initPsiArrays ( N_psix, BPGpmlx, psixH.y, psixH.z );
initPsiArrays ( N_psiy, BPGpmly, psiyH.z, psiyH.x );
initPsiArrays ( N_psiz, BPGpmlz, psizH.x, psizH.y );
// --------------------------------------------------------------------------------
// Copy arrays from host to device
/*
float * tmpCE;
tmpCE = (float *) calloc ( N.x*N.y*N.z + surplus, sizeof(float) );
for ( i=0; i<N.x*N.y*N.z; i++ ) tmpCE[i] = CE.x[0][0][i];
hipMemcpy ( devCE.x, tmpCE, size_devC, hipMemcpyHostToDevice );
for ( i=0; i<N.x*N.y*N.z; i++ ) tmpCE[i] = CE.y[0][0][i];
hipMemcpy ( devCE.y, tmpCE, size_devC, hipMemcpyHostToDevice );
for ( i=0; i<N.x*N.y*N.z; i++ ) tmpCE[i] = CE.z[0][0][i];
hipMemcpy ( devCE.z, tmpCE, size_devC, hipMemcpyHostToDevice );
*/
hipMemcpy ( devCE.x, CE.x[0][0], size_devC, hipMemcpyHostToDevice );
hipMemcpy ( devCE.y, CE.y[0][0], size_devC, hipMemcpyHostToDevice );
hipMemcpy ( devCE.z, CE.z[0][0], size_devC, hipMemcpyHostToDevice );
free(CE.x);
free(CE.y);
free(CE.z);
//free(tmpCE);
// --------------------------------------------------------------------------------
// time loop
t0 = time(0);
//for ( tstep=1; tstep<=TMAX; tstep++) {
for ( tstep=1; tstep<=500; tstep++) {
// E-fields main region update
for ( i=0; i<NK; i++)hipLaunchKernelGGL(( updateE) , dim3(DGmain[i]),dim3(DB),NSmain, 0, N, devE, devH, devCE, idx0[i] );
// E-fields CPML region update
hipLaunchKernelGGL(( updateCPMLxE_cmem) , dim3(DGpmlx),dim3(DB), 0, 0, N, devE, devH, devCE, psixE.y.f, psixE.z.f, 0);
hipLaunchKernelGGL(( updateCPMLxE_cmem) , dim3(DGpmlx),dim3(DB), 0, 0, N, devE, devH, devCE, psixE.y.b, psixE.z.b, 1);
hipLaunchKernelGGL(( updateCPMLyE_cmem1) , dim3(DGpmly),dim3(DB), 0, 0, N, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0);
hipLaunchKernelGGL(( updateCPMLyE_cmem1) , dim3(DGpmly),dim3(DB), 0, 0, N, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1);
//updateCPMLyE_cmem2 <<<DGpmly,DB>>> ( N, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0);
//updateCPMLyE_cmem2 <<<DGpmly,DB>>> ( N, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1);
// Source update
hipLaunchKernelGGL(( updateSrc) , dim3(DGsrc),dim3(DBsrc), 0, 0, N, devE, tstep );
// H-fields main region update
for ( i=0; i<NK; i++)hipLaunchKernelGGL(( updateH) , dim3(DGmain[i]),dim3(DB),NSmain, 0, N, devE, devH, idx0[i] );
// H-fields CPML region update
hipLaunchKernelGGL(( updateCPMLxH_cmem) , dim3(DGpmlx),dim3(DB), 0, 0, N, devE, devH, psixH.y.f, psixH.z.f, 0);
hipLaunchKernelGGL(( updateCPMLxH_cmem) , dim3(DGpmlx),dim3(DB), 0, 0, N, devE, devH, psixH.y.b, psixH.z.b, 1);
hipLaunchKernelGGL(( updateCPMLyH_cmem1) , dim3(DGpmly),dim3(DB), 0, 0, N, devE, devH, psiyH.z.f, psiyH.x.f, 0);
hipLaunchKernelGGL(( updateCPMLyH_cmem1) , dim3(DGpmly),dim3(DB), 0, 0, N, devE, devH, psiyH.z.b, psiyH.x.b, 1);
//updateCPMLyH_cmem2 <<<DGpmly,DB>>> ( N, devE, devH, psiyH.z.f, psiyH.x.f, 0);
//updateCPMLyH_cmem2 <<<DGpmly,DB>>> ( N, devE, devH, psiyH.z.b, psiyH.x.b, 1);
if ( tstep/50*50 == tstep ) {
// Copy arrays from device to host
//hipMemcpy( Ex[0][0], devE.x, (N.x+1)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost );
hipMemcpy( Ez[0][0], devE.z, (N.x+1)*N.y*N.z*sizeof(float), hipMemcpyDeviceToHost );
//print_array(N, Ex);
//dumpToH5(N.x+1, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep);
//exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep);
dumpToH5(N.x+1, N.y, N.z, 0, 0, N.z/2, N.x, N.y-1, N.z/2, Ez, "gpu_png/Ez-%05d.h5", tstep);
//dumpToH5(N.x+1, N.y, N.z, 0, 0, 0, N.x, N.y-1, 0, Ez, "gpu_png/Ez-%05d.h5", tstep);
exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep);
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
}
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
free(Ex);
free(Ez);
freeMainArrays ( devE );
freeMainArrays ( devH );
freeMainArrays ( devCE );
freePsiArrays ( psixE, psiyE, psizE );
freePsiArrays ( psixH, psiyH, psizH );
}
|
e0dd8d4d697033f5f8e5610ff5b4bf3b0e5824de.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <hdf5.h>
#define NPML 10
const float light_velocity = 2.99792458e8; // m s-
const float ep0 = 8.85418781762038920e-12; // F m-1 (permittivity at vacuum)
const float mu0 = 1.25663706143591730e-6; // N A-2 (permeability at vacuum)
const float imp0 = sqrt( mu0/ep0 ); // (impedance at vacuum)
const float pi = 3.14159265358979323846;
const int MBPG = 65535;
const int MTPB = 512;
// Allocate constant memory for CPML
__constant__ float rcmbE[2*(NPML+1)];
__constant__ float rcmaE[2*(NPML+1)];
__constant__ float rcmbH[2*(NPML+1)];
__constant__ float rcmaH[2*(NPML+1)];
typedef struct N3 {
int x, y, z;
} N3;
typedef struct P3F3 {
float ***x, ***y, ***z;
} P3F3;
typedef struct P1F3 {
float *x, *y, *z;
} P1F3;
typedef struct P1F2 {
float *f, *b;
} P1F2;
typedef struct P1F6 {
P1F2 x, y, z;
} P1F6;
__host__ void updateTimer(time_t t0, int tstep, char str[]) {
int elapsedTime=(int)(time(0)-t0);
sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60);
}
__host__ void exec(char *format, ...) {
char str[1024];
va_list ap;
va_start(ap, format);
vsprintf(str, format, ap);
system(str);
}
__host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) {
char filename[1024];
va_list ap;
va_start(ap, format);
vsprintf(filename, format, ap);
hid_t file, dataset, filespace, memspace;
hsize_t dimsm[3] = { Ni, Nj, Nk };
hsize_t start[3] = { is, js, ks };
hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke };
memspace = H5Screate_simple(3, dimsm, 0);
filespace = H5Screate_simple(3, count, 0);
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0);
H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]);
H5Dclose(dataset);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(file);
}
__host__ void print_array(N3 N, float ***a) {
int j,k;
for (j=0; j<N.y; j++) {
for (k=0; k<N.z; k++) {
printf("%1.4f\t", a[N.x/2][j][k]);
}
printf("\n");
}
printf("\n");
}
__host__ float ***makeArray(N3 N) {
float ***f;
f = (float ***) calloc (N.x, sizeof(float **));
f[0] = (float **) calloc (N.y*N.x, sizeof(float *));
f[0][0] = (float *) calloc (N.z*N.y*N.x, sizeof(float));
for (int i=0; i<N.x; i++) f[i] = f[0] + i*N.y;
for (int i=0; i<N.y*N.x; i++) f[0][i] = f[0][0] + i*N.z;
return f;
}
__host__ void set_geometry(N3 N, P3F3 CE) {
int i,j,k;
for (i=0; i<N.x-1; i++) {
for (j=0; j<N.y-1; j++) {
for (k=0; k<N.z-1; k++) {
CE.x[i][j][k] = 0.5;
CE.y[i][j][k] = 0.5;
CE.z[i][j][k] = 0.5;
}
}
}
for (j=0; j<N.y-1; j++) for (k=0; k<N.z-1; k++) CE.x[N.x-1][j][k] = 0.5;
for (i=0; i<N.x-1; i++) for (k=0; k<N.z-1; k++) CE.y[i][N.y-1][k] = 0.5;
for (i=0; i<N.x-1; i++) for (j=0; j<N.y-1; j++) CE.z[i][j][N.z-1] = 0.5;
}
__host__ void verify_16xNz(int Nz) {
int R = Nz%16;
int N1 = Nz-R;
int N2 = N1+16;
if ( R == 0 ) printf("Nz is a multiple of 16.\n");
else {
printf("Error: Nz is not a multiple of 16.\n");
printf("Recommend Nz: %d or %d\n", N1, N2);
exit(0);
}
}
__global__ void initArray(int Ntot, float *a, int idx0) {
int idx = idx0 + blockIdx.x*blockDim.x + threadIdx.x;
if ( idx < Ntot ) a[idx] = 0;
}
__host__ void initMainArrays(int Ntot, P1F3 F) {
int i;
int BPG = Ntot%MTPB == 0 ? Ntot/MTPB : Ntot/MTPB + 1;
int NK = BPG/MBPG + 1;
int sBPG = BPG/NK;
int idx0[NK];
dim3 DG[NK];
for ( i=0; i<NK; i++) {
idx0[i] = MTPB*sBPG*i;
DG[i] = dim3(sBPG);
}
DG[NK-1] = dim3(sBPG+BPG%NK);
dim3 DB(MTPB);
for ( i=0; i<NK; i++) {
initArray <<<DG[i],DB>>> (Ntot, F.x, idx0[i]);
initArray <<<DG[i],DB>>> (Ntot, F.y, idx0[i]);
initArray <<<DG[i],DB>>> (Ntot, F.z, idx0[i]);
}
printf("main init: Ntot=%d, BPG=%d, sBPG(%d)=%d\n", Ntot, BPG, NK, sBPG);
}
__host__ void initPsiArrays(int Ntot, int BPG, P1F2 psi1, P1F2 psi2) {
initArray <<<dim3(BPG),dim3(MTPB)>>> (Ntot, psi1.f, 0);
initArray <<<dim3(BPG),dim3(MTPB)>>> (Ntot, psi1.b, 0);
initArray <<<dim3(BPG),dim3(MTPB)>>> (Ntot, psi2.f, 0);
initArray <<<dim3(BPG),dim3(MTPB)>>> (Ntot, psi2.b, 0);
}
__host__ void freeMainArrays(P1F3 F) {
cudaFree(F.x);
cudaFree(F.y);
cudaFree(F.z);
}
__host__ void freePsiArrays(P1F6 psix, P1F6 psiy, P1F6 psiz) {
cudaFree(psix.y.f);
cudaFree(psix.y.b);
cudaFree(psix.z.f);
cudaFree(psix.z.b);
cudaFree(psiy.z.f);
cudaFree(psiy.z.b);
cudaFree(psiy.x.f);
cudaFree(psiy.x.b);
cudaFree(psiz.x.f);
cudaFree(psiz.x.b);
cudaFree(psiz.y.f);
cudaFree(psiz.y.b);
}
__global__ void updateE(N3 N, P1F3 E, P1F3 H, P1F3 CE, int idx0) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk + idx0;
int Nyz = N.y*N.z;
int eidx = idx + Nyz;
extern __shared__ float hs[];
float* hx = (float*) hs;
float* hy = (float*) &hx[blockDim.x+1];
float* hz = (float*) &hy[blockDim.x+1];
hx[tk] = H.x[idx];
hy[tk] = H.y[idx];
hz[tk] = H.z[idx];
if ( tk==blockDim.x-1 ) {
hx[tk+1] = H.x[idx+1];
hy[tk+1] = H.y[idx+1];
}
__syncthreads();
E.x[eidx] += CE.x[idx]*( H.z[idx+N.z] - hz[tk] - hy[tk+1] + hy[tk] );
E.y[eidx] += CE.y[idx]*( hx[tk+1] - hx[tk] - H.z[idx+Nyz] + hz[tk] );
E.z[eidx] += CE.z[idx]*( H.y[idx+Nyz] - hy[tk] - H.x[idx+N.z] + hx[tk] );
}
__global__ void updateH(N3 N, P1F3 E, P1F3 H, int idx0) {
int tk = threadIdx.x;
int idx = blockIdx.x*blockDim.x + tk + idx0;
int Nyz = N.y*N.z;
int eidx = idx + Nyz;
extern __shared__ float es[];
float* ex = (float*) es;
float* ey = (float*) &ex[blockDim.x+1];
float* ez = (float*) &ey[blockDim.x+1];
ex[tk+1] = E.x[eidx];
ey[tk+1] = E.y[eidx];
ez[tk] = E.z[eidx];
if ( tk==0 ) {
ex[0] = E.x[eidx-1];
ey[0] = E.y[eidx-1];
}
__syncthreads();
H.x[idx] -= 0.5*( ez[tk] - E.z[eidx-N.z] - ey[tk+1] + ey[tk] );
H.y[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + E.z[eidx-Nyz] );
H.z[idx] -= 0.5*( ey[tk+1] - E.y[eidx-Nyz] - ex[tk+1] + E.x[eidx-N.z] );
}
__global__ void updateSrc(N3 N, P1F3 E, int tstep) {
int idx = threadIdx.x;
//int ijk = idx*N.y*N.z + (N.y/2)*N.z + (N.z/2);
int ijk = (N.x/2+1)*N.y*N.z + (N.y/2)*N.z + idx;
//E.x[ijk] += sin(0.1*tstep);
E.z[ijk] += sin(0.1*tstep);
}
__global__ void updateCPMLxE_cmem(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = N.y*N.z;
int pi = pidx/Nyz + backward*(NPML+1);
int idx = pidx + backward*(N.x-NPML-1)*Nyz;
int eidx = idx + Nyz;
psi1[pidx] = rcmbE[pi]*psi1[pidx] + rcmaE[pi]*( H.z[idx+Nyz] - H.z[idx] );
E.y[eidx] -= CE.y[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pi]*psi2[pidx] + rcmaE[pi]*( H.y[idx+Nyz] - H.y[idx] );
E.z[eidx] += CE.z[idx]*psi2[pidx];
}
__global__ void updateCPMLxH_cmem(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int Nyz = N.y*N.z;
int pi = pidx/Nyz + backward*(NPML+1);
int idx = pidx + backward*(N.x-NPML)*Nyz;
int eidx = idx + Nyz;
psi1[pidx] = rcmbH[pi]*psi1[pidx] + rcmaH[pi]*( E.z[eidx] - E.z[eidx-Nyz] );
H.y[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pi]*psi2[pidx] + rcmaH[pi]*( E.y[eidx] - E.y[eidx-Nyz] );
H.z[idx] -= 0.5*psi2[pidx];
}
__global__ void updateCPMLyE_cmem1(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int i = pidx/(NPML*N.z);
int pj = ( pidx - i*NPML*N.z )/N.z + backward*(NPML+1);
int idx = pidx + (i+backward)*(N.y-NPML)*N.z - backward*N.z;
//int idx = pidx + i*(N.y-NPML)*N.z + backward*(N.y-NPML-1)*N.z;
int eidx = idx + N.y*N.z;
psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( H.x[idx+N.z] - H.x[idx] );
E.z[eidx] -= CE.z[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( H.z[idx+N.z] - H.z[idx] );
E.x[eidx] += CE.x[idx]*psi2[pidx];
}
__global__ void updateCPMLyH_cmem1(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int i = pidx/(NPML*N.z);
int pj = ( pidx - i*NPML*N.z )/N.z + backward*(NPML+1);
int idx = pidx + (i+backward)*(N.y-NPML)*N.z;
int eidx = idx + N.y*N.z;
psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( E.x[eidx] - E.x[eidx-N.z] );
H.z[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( E.z[eidx] - E.z[eidx-N.z] );
H.x[idx] -= 0.5*psi2[pidx];
}
__global__ void updateCPMLyE_cmem2(N3 N, P1F3 E, P1F3 H, P1F3 CE, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int j = pidx/(N.x*N.z);
int i = (pidx - j*N.x*N.z)/N.z;
int k = pidx%N.z;
int pj = j + backward*(NPML+1);
int idx = k + (j + i*N.y)*N.z + backward*(N.y-NPML-1)*N.z;
int eidx = idx + N.y*N.z;
psi1[pidx] = rcmbE[pj]*psi1[pidx] + rcmaE[pj]*( H.x[idx+N.z] - H.x[idx] );
E.z[eidx] -= CE.z[idx]*psi1[pidx];
psi2[pidx] = rcmbE[pj]*psi2[pidx] + rcmaE[pj]*( H.z[idx+N.z] - H.z[idx] );
E.x[eidx] += CE.x[idx]*psi2[pidx];
}
__global__ void updateCPMLyH_cmem2(N3 N, P1F3 E, P1F3 H, float *psi1, float *psi2, int backward) {
int pidx = blockIdx.x*blockDim.x + threadIdx.x;
int j = pidx/(N.x*N.z);
int i = (pidx - j*N.x*N.z)/N.z;
int k = pidx%N.z;
int pj = j + backward*(NPML+1);
int idx = k + (j + i*N.y)*N.z + backward*(N.y-NPML)*N.z;
int eidx = idx + N.y*N.z;
psi1[pidx] = rcmbH[pj]*psi1[pidx] + rcmaH[pj]*( E.x[eidx] - E.x[eidx-N.z] );
H.z[idx] += 0.5*psi1[pidx];
psi2[pidx] = rcmbH[pj]*psi2[pidx] + rcmaH[pj]*( E.z[eidx] - E.z[eidx-N.z] );
H.x[idx] -= 0.5*psi2[pidx];
}
int main() {
int tstep;
char time_str[32];
time_t t0;
int i;
// --------------------------------------------------------------------------------
// Set the parameters
N3 N;
N.x = 200;
N.y = 300;
N.z = 208;
//N.y = 16;
//N.z = 20;
int TMAX = 10000;
float S = 0.5;
float dx = 10e-9;
float dt = S*dx/light_velocity;
int Npml = NPML;
printf("Npml=%d\n", Npml);
printf("N(%d,%d,%d), TMAX=%d\n", N.x, N.y, N.z, TMAX);
verify_16xNz( N.z );
// --------------------------------------------------------------------------------
// Allocate host memory
P3F3 CE;
CE.x = makeArray(N);
CE.y = makeArray(N);
CE.z = makeArray(N);
float ***Ex, ***Ez;
N3 Nxp;
Nxp.x = N.x+1;
Nxp.y = N.y;
Nxp.z = N.z;
Ex = makeArray(Nxp);
Ez = makeArray(Nxp);
// --------------------------------------------------------------------------------
// Geometry
set_geometry(N, CE);
// --------------------------------------------------------------------------------
// Parameters for CPML
int m = 4; // grade_order
float sigma_max = (m+1.)/(15*pi*Npml*dx);
float alpha = 0.05;
float *sigmaE, *bE, *aE;
float *sigmaH, *bH, *aH;
sigmaE = (float *) calloc (2*(Npml+1), sizeof(float));
sigmaH = (float *) calloc (2*(Npml+1), sizeof(float));
bE = (float *) calloc (2*(Npml+1), sizeof(float));
bH = (float *) calloc (2*(Npml+1), sizeof(float));
aE = (float *) calloc (2*(Npml+1), sizeof(float));
aH = (float *) calloc (2*(Npml+1), sizeof(float));
for (i=0; i<Npml; i++) {
sigmaE[i] = pow( (Npml-0.5-i)/Npml, m )*sigma_max;
sigmaE[i+Npml+1] = pow( (0.5+i)/Npml, m )*sigma_max;
sigmaH[i] = pow( (float)(Npml-i)/Npml, m )*sigma_max;
sigmaH[i+Npml+1] = pow( (1.+i)/Npml, m )*sigma_max;
}
for (i=0; i<2*(Npml+1); i++) {
bE[i] = exp( -(sigmaE[i] + alpha)*dt/ep0 );
bH[i] = exp( -(sigmaH[i] + alpha)*dt/ep0 );
aE[i] = sigmaE[i]/(sigmaE[i]+alpha)*(bE[i]-1);
aH[i] = sigmaH[i]/(sigmaH[i]+alpha)*(bH[i]-1);
//printf("[%d]\tsigmaE=%g,\tbE=%g,aE=%g\n", i, sigmaE[i], bE[i], aE[i]);
//printf("[%d]\tsigmaH=%g,\tbH=%g,aH=%g\n", i, sigmaH[i], bH[i], aH[i]);
}
bE[Npml] = 0;
bH[Npml] = 0;
aE[2*Npml+1] = 0;
aH[2*Npml+1] = 0;
free(sigmaE);
free(sigmaH);
// --------------------------------------------------------------------------------
// Copy arrays from host to constant memory
cudaMemcpyToSymbol(rcmbE, bE, 2*Npml*sizeof(float));
cudaMemcpyToSymbol(rcmaE, aE, 2*Npml*sizeof(float));
cudaMemcpyToSymbol(rcmbH, bH, 2*Npml*sizeof(float));
cudaMemcpyToSymbol(rcmaH, aH, 2*Npml*sizeof(float));
// --------------------------------------------------------------------------------
// Set the GPU parameters
// TPB: Number of threads per block
// BPG: Number of thread blocks per grid
int BPG;
dim3 DB(MTPB);
// main update
int Ntotmain = N.x*N.y*N.z;
int BPGmain = BPG = Ntotmain%MTPB == 0 ? Ntotmain/MTPB : Ntotmain/MTPB + 1;
int NK = BPG/MBPG + 1; // Number of kernel
int sBPG = BPG/NK;
int idx0[NK];
dim3 DGmain[NK];
for ( i=0; i<NK; i++ ) {
idx0[i] = MTPB*sBPG*i;
DGmain[i] = dim3(sBPG);
}
DGmain[NK-1] = dim3(sBPG+BPG%NK);
size_t NSmain = sizeof(float)*( 2*(MTPB+1)+MTPB );
printf("main: Ntot=%d(%dx%dx%d), BPG=%d, sBPG(%d)=%d, NS=%d\n", Ntotmain, N.x, N.y, N.z, BPGmain, NK, sBPG, NSmain);
// source
// int TPB = N.x;
int TPB = N.z;
BPG = 1;
dim3 DBsrc(TPB);
dim3 DGsrc(BPG);
printf("source: TPB=%d, BPG=%d\n", TPB, BPG);
// cpml
int Ntotpmlx = Npml*N.y*N.z;
int BPGpmlx = Ntotpmlx%MTPB == 0 ? Ntotpmlx/MTPB : Ntotpmlx/MTPB + 1;
dim3 DGpmlx = dim3(BPGpmlx);
printf("pml (x): Ntot=%d(%dx%dx%d), BPG=%d\n", Ntotpmlx, Npml, N.y, N.z, BPGpmlx);
int Ntotpmly = N.x*Npml*N.z;
int BPGpmly = Ntotpmly%MTPB == 0 ? Ntotpmly/MTPB : Ntotpmly/MTPB + 1;
dim3 DGpmly = dim3(BPGpmly);
printf("pml (y): Ntot=%d(%dx%dx%d), BPG=%d\n", Ntotpmly, N.x, Npml, N.z, BPGpmly);
int Npml_pitch = (Npml/16 + 1)*16;
int Ntotpmlz = N.x*N.y*Npml_pitch;
int BPGpmlz = Ntotpmlz%MTPB == 0 ? Ntotpmlz/MTPB : Ntotpmlz/MTPB + 1;
dim3 DGpmlz = dim3(BPGpmlz);
printf("pml (z): Ntot=%d(%dx%dx%d), BPG=%d\n", Ntotpmlz, N.x, N.y, Npml_pitch, BPGpmlz);
// --------------------------------------------------------------------------------
// Allocate device memory
P1F3 devE, devH;
P1F3 devCE;
int surplus, Nthreads;
int N_devF, N_devC;
size_t size_devF, size_devC;
Nthreads = BPGmain*MTPB;
surplus = Nthreads - Ntotmain;
N_devF = Nthreads + N.y*N.z;
size_devF = N_devF*sizeof(float);
N_devC = Nthreads;
size_devC = N_devC*sizeof(float);
printf("surplus main: %d\n", surplus);
cudaMalloc ( (void**) &devE.x, size_devF );
cudaMalloc ( (void**) &devE.y, size_devF );
cudaMalloc ( (void**) &devE.z, size_devF );
cudaMalloc ( (void**) &devH.x, size_devF );
cudaMalloc ( (void**) &devH.y, size_devF );
cudaMalloc ( (void**) &devH.z, size_devF );
cudaMalloc ( (void**) &devCE.x, size_devC );
cudaMalloc ( (void**) &devCE.y, size_devC );
cudaMalloc ( (void**) &devCE.z, size_devC );
// --------------------------------------------------------------------------------
// Allocate device memory for CPML
P1F6 psixE, psiyE, psizE;
P1F6 psixH, psiyH, psizH;
int N_psix, N_psiy, N_psiz;
size_t size_psix, size_psiy, size_psiz;
N_psix = Nthreads = BPGpmlx*MTPB;
size_psix = Nthreads*sizeof(float);
surplus = Nthreads - Ntotpmlx;
//printf("Nthreads=%d, Ntotpmlx=%d\n", Nthreads, Ntotpmlx);
printf("surplus pml(x): %d\n", surplus);
cudaMalloc ( (void**) &psixE.y.f, size_psix );
cudaMalloc ( (void**) &psixE.y.b, size_psix );
cudaMalloc ( (void**) &psixE.z.f, size_psix );
cudaMalloc ( (void**) &psixE.z.b, size_psix );
cudaMalloc ( (void**) &psixH.y.f, size_psix );
cudaMalloc ( (void**) &psixH.y.b, size_psix );
cudaMalloc ( (void**) &psixH.z.f, size_psix );
cudaMalloc ( (void**) &psixH.z.b, size_psix );
N_psiy = Nthreads = BPGpmly*MTPB;
size_psiy = Nthreads*sizeof(float);
surplus = Nthreads - Ntotpmly;
printf("surplus pml(y): %d\n", surplus);
cudaMalloc ( (void**) &psiyE.z.f, size_psiy );
cudaMalloc ( (void**) &psiyE.z.b, size_psiy );
cudaMalloc ( (void**) &psiyE.x.f, size_psiy );
cudaMalloc ( (void**) &psiyE.x.b, size_psiy );
cudaMalloc ( (void**) &psiyH.z.f, size_psiy );
cudaMalloc ( (void**) &psiyH.z.b, size_psiy );
cudaMalloc ( (void**) &psiyH.x.f, size_psiy );
cudaMalloc ( (void**) &psiyH.x.b, size_psiy );
N_psiz = Nthreads = BPGpmlz*MTPB;
size_psiz = Nthreads*sizeof(float);
surplus = Nthreads - Ntotpmlz;
printf("surplus pml(z): %d\n", surplus);
cudaMalloc ( (void**) &psizE.x.f, size_psiz );
cudaMalloc ( (void**) &psizE.x.b, size_psiz );
cudaMalloc ( (void**) &psizE.y.f, size_psiz );
cudaMalloc ( (void**) &psizE.y.b, size_psiz );
cudaMalloc ( (void**) &psizH.x.f, size_psiz );
cudaMalloc ( (void**) &psizH.x.b, size_psiz );
cudaMalloc ( (void**) &psizH.y.f, size_psiz );
cudaMalloc ( (void**) &psizH.y.b, size_psiz );
// --------------------------------------------------------------------------------
// Initialize the device arrays
initMainArrays ( N_devF, devE );
initMainArrays ( N_devF, devH );
initMainArrays ( N_devC, devCE );
initPsiArrays ( N_psix, BPGpmlx, psixE.y, psixE.z );
initPsiArrays ( N_psiy, BPGpmly, psiyE.z, psiyE.x );
initPsiArrays ( N_psiz, BPGpmlz, psizE.x, psizE.y );
initPsiArrays ( N_psix, BPGpmlx, psixH.y, psixH.z );
initPsiArrays ( N_psiy, BPGpmly, psiyH.z, psiyH.x );
initPsiArrays ( N_psiz, BPGpmlz, psizH.x, psizH.y );
// --------------------------------------------------------------------------------
// Copy arrays from host to device
/*
float * tmpCE;
tmpCE = (float *) calloc ( N.x*N.y*N.z + surplus, sizeof(float) );
for ( i=0; i<N.x*N.y*N.z; i++ ) tmpCE[i] = CE.x[0][0][i];
cudaMemcpy ( devCE.x, tmpCE, size_devC, cudaMemcpyHostToDevice );
for ( i=0; i<N.x*N.y*N.z; i++ ) tmpCE[i] = CE.y[0][0][i];
cudaMemcpy ( devCE.y, tmpCE, size_devC, cudaMemcpyHostToDevice );
for ( i=0; i<N.x*N.y*N.z; i++ ) tmpCE[i] = CE.z[0][0][i];
cudaMemcpy ( devCE.z, tmpCE, size_devC, cudaMemcpyHostToDevice );
*/
cudaMemcpy ( devCE.x, CE.x[0][0], size_devC, cudaMemcpyHostToDevice );
cudaMemcpy ( devCE.y, CE.y[0][0], size_devC, cudaMemcpyHostToDevice );
cudaMemcpy ( devCE.z, CE.z[0][0], size_devC, cudaMemcpyHostToDevice );
free(CE.x);
free(CE.y);
free(CE.z);
//free(tmpCE);
// --------------------------------------------------------------------------------
// time loop
t0 = time(0);
//for ( tstep=1; tstep<=TMAX; tstep++) {
for ( tstep=1; tstep<=500; tstep++) {
// E-fields main region update
for ( i=0; i<NK; i++) updateE <<<DGmain[i],DB,NSmain>>> ( N, devE, devH, devCE, idx0[i] );
// E-fields CPML region update
updateCPMLxE_cmem <<<DGpmlx,DB>>> ( N, devE, devH, devCE, psixE.y.f, psixE.z.f, 0);
updateCPMLxE_cmem <<<DGpmlx,DB>>> ( N, devE, devH, devCE, psixE.y.b, psixE.z.b, 1);
updateCPMLyE_cmem1 <<<DGpmly,DB>>> ( N, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0);
updateCPMLyE_cmem1 <<<DGpmly,DB>>> ( N, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1);
//updateCPMLyE_cmem2 <<<DGpmly,DB>>> ( N, devE, devH, devCE, psiyE.z.f, psiyE.x.f, 0);
//updateCPMLyE_cmem2 <<<DGpmly,DB>>> ( N, devE, devH, devCE, psiyE.z.b, psiyE.x.b, 1);
// Source update
updateSrc <<<DGsrc,DBsrc>>> ( N, devE, tstep );
// H-fields main region update
for ( i=0; i<NK; i++) updateH <<<DGmain[i],DB,NSmain>>> ( N, devE, devH, idx0[i] );
// H-fields CPML region update
updateCPMLxH_cmem <<<DGpmlx,DB>>> ( N, devE, devH, psixH.y.f, psixH.z.f, 0);
updateCPMLxH_cmem <<<DGpmlx,DB>>> ( N, devE, devH, psixH.y.b, psixH.z.b, 1);
updateCPMLyH_cmem1 <<<DGpmly,DB>>> ( N, devE, devH, psiyH.z.f, psiyH.x.f, 0);
updateCPMLyH_cmem1 <<<DGpmly,DB>>> ( N, devE, devH, psiyH.z.b, psiyH.x.b, 1);
//updateCPMLyH_cmem2 <<<DGpmly,DB>>> ( N, devE, devH, psiyH.z.f, psiyH.x.f, 0);
//updateCPMLyH_cmem2 <<<DGpmly,DB>>> ( N, devE, devH, psiyH.z.b, psiyH.x.b, 1);
if ( tstep/50*50 == tstep ) {
// Copy arrays from device to host
//cudaMemcpy( Ex[0][0], devE.x, (N.x+1)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost );
cudaMemcpy( Ez[0][0], devE.z, (N.x+1)*N.y*N.z*sizeof(float), cudaMemcpyDeviceToHost );
//print_array(N, Ex);
//dumpToH5(N.x+1, N.y, N.z, N.x/2, 0, 0, N.x/2, N.y-1, N.z-1, Ex, "gpu_png/Ex-%05d.h5", tstep);
//exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep);
dumpToH5(N.x+1, N.y, N.z, 0, 0, N.z/2, N.x, N.y-1, N.z/2, Ez, "gpu_png/Ez-%05d.h5", tstep);
//dumpToH5(N.x+1, N.y, N.z, 0, 0, 0, N.x, N.y-1, 0, Ez, "gpu_png/Ez-%05d.h5", tstep);
exec("h5topng -ZM0.1 -z0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ez-%05d.h5", tstep);
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
}
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
free(Ex);
free(Ez);
freeMainArrays ( devE );
freeMainArrays ( devH );
freeMainArrays ( devCE );
freePsiArrays ( psixE, psiyE, psizE );
freePsiArrays ( psixH, psiyH, psizH );
}
|
8595abe3c626d266e5ccb7d9cc99179620027ca2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define N 4
#define TAG 0
#define RHO 0.5 // related to pitch
#define ETA 2e-4 // related to duration of sound
#define BOUNDARY_GAIN 0.75 // clamped edge vs free edge
__global__ void process(float * u, float * u1, float * u2, int T){
//center
float sum_of_neighbors, previous_value, previous_previous_value;
int i = threadIdx.x/4;
int j = threadIdx.x%4;
int tid = (i * 4) + j; // Indexes
//[((i-1)*4)+j]
//[(i*4)+(j-1)]
int t;
for (t = 0; t < T; t++) {
if(tid == 5 || tid == 6 || tid == 9 || tid == 10){
sum_of_neighbors = u1[((i-1)*4)+j] + u1[((i+1)*4)+j] + u1[(i*4)+(j-1)] + u1[(i*4)+(j+1)];
previous_value = u1[tid];
previous_previous_value = u2[tid];
u[tid] = (RHO * (sum_of_neighbors -4*previous_value) + 2*previous_value -(1-ETA)*previous_previous_value)/(1+ETA);
}
__syncthreads();
// update side points
// 0 * 4) + j
if(tid == 1 || tid == 2 || tid == 4 || tid == 8 || tid == 7 || tid == 11 || tid == 13 || tid == 14){
u[i] = BOUNDARY_GAIN * u[4+i]; // top
u[((N-1) * 4) + i] = BOUNDARY_GAIN * u[(N-2)*4 + i]; // bottom
u[i*4] = BOUNDARY_GAIN * u[i*4 + 1]; // left
u[i*4 + N-1] = BOUNDARY_GAIN * u[i*4 + N-2]; // right
}
__syncthreads();
// update corners
if(tid == 0 || tid == 3 || tid == 12 || tid == 15){
u[0] = BOUNDARY_GAIN * u[4];
u[(N-1)*4] = BOUNDARY_GAIN * u[(N-2)*4];
u[N-1] = BOUNDARY_GAIN * u[N-2];
u[(N-1)*4 + N-1] = BOUNDARY_GAIN * u[4*(N-1)+N-2];
}
if(tid == 10)
printf("%f,\n", u[10]);
float * temp;
temp = u2;
u2 = u1;
u1 = u;
u = temp;
}
}
int main(int argc, char *argv[]){
//Get number of iterations
int T = atoi(argv[1]);
const int ARRAY_SIZE = 16;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float u[ARRAY_SIZE];
float u1[ARRAY_SIZE];
float u2[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
u[i] = 0.0;
u1[i] = 0.0;
u2[i] = 0.0;
}
u1[10] = 1.0;
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_u;
float * d_u1;
float * d_u2;
// allocate GPU memory
hipMalloc(&d_u, ARRAY_BYTES);
hipMalloc(&d_u1, ARRAY_BYTES);
hipMalloc(&d_u2, ARRAY_BYTES);
// transfer the array to the GPU
hipMemcpy(d_u, u, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_u1, u1, ARRAY_BYTES, hipMemcpyHostToDevice);
hipMemcpy(d_u2, u2, ARRAY_BYTES, hipMemcpyHostToDevice);
// launch the kernel
int threadsPerBlock = 16;
int numBlocks = 1;
clock_t tic = clock();
hipLaunchKernelGGL(( process), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, d_u, d_u1, d_u2, T);
hipDeviceSynchronize();
// copy back the result array to the CPU
hipMemcpy(h_out, d_u, ARRAY_BYTES, hipMemcpyDeviceToHost);
hipFree(d_u);
hipFree(d_u1);
hipFree(d_u2);
clock_t toc = clock();
printf("Elapsed: %f seconds\n", (double)(toc - tic) / CLOCKS_PER_SEC);
return 0;
}
|
8595abe3c626d266e5ccb7d9cc99179620027ca2.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define N 4
#define TAG 0
#define RHO 0.5 // related to pitch
#define ETA 2e-4 // related to duration of sound
#define BOUNDARY_GAIN 0.75 // clamped edge vs free edge
__global__ void process(float * u, float * u1, float * u2, int T){
//center
float sum_of_neighbors, previous_value, previous_previous_value;
int i = threadIdx.x/4;
int j = threadIdx.x%4;
int tid = (i * 4) + j; // Indexes
//[((i-1)*4)+j]
//[(i*4)+(j-1)]
int t;
for (t = 0; t < T; t++) {
if(tid == 5 || tid == 6 || tid == 9 || tid == 10){
sum_of_neighbors = u1[((i-1)*4)+j] + u1[((i+1)*4)+j] + u1[(i*4)+(j-1)] + u1[(i*4)+(j+1)];
previous_value = u1[tid];
previous_previous_value = u2[tid];
u[tid] = (RHO * (sum_of_neighbors -4*previous_value) + 2*previous_value -(1-ETA)*previous_previous_value)/(1+ETA);
}
__syncthreads();
// update side points
// 0 * 4) + j
if(tid == 1 || tid == 2 || tid == 4 || tid == 8 || tid == 7 || tid == 11 || tid == 13 || tid == 14){
u[i] = BOUNDARY_GAIN * u[4+i]; // top
u[((N-1) * 4) + i] = BOUNDARY_GAIN * u[(N-2)*4 + i]; // bottom
u[i*4] = BOUNDARY_GAIN * u[i*4 + 1]; // left
u[i*4 + N-1] = BOUNDARY_GAIN * u[i*4 + N-2]; // right
}
__syncthreads();
// update corners
if(tid == 0 || tid == 3 || tid == 12 || tid == 15){
u[0] = BOUNDARY_GAIN * u[4];
u[(N-1)*4] = BOUNDARY_GAIN * u[(N-2)*4];
u[N-1] = BOUNDARY_GAIN * u[N-2];
u[(N-1)*4 + N-1] = BOUNDARY_GAIN * u[4*(N-1)+N-2];
}
if(tid == 10)
printf("%f,\n", u[10]);
float * temp;
temp = u2;
u2 = u1;
u1 = u;
u = temp;
}
}
int main(int argc, char *argv[]){
//Get number of iterations
int T = atoi(argv[1]);
const int ARRAY_SIZE = 16;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float u[ARRAY_SIZE];
float u1[ARRAY_SIZE];
float u2[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
u[i] = 0.0;
u1[i] = 0.0;
u2[i] = 0.0;
}
u1[10] = 1.0;
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d_u;
float * d_u1;
float * d_u2;
// allocate GPU memory
cudaMalloc(&d_u, ARRAY_BYTES);
cudaMalloc(&d_u1, ARRAY_BYTES);
cudaMalloc(&d_u2, ARRAY_BYTES);
// transfer the array to the GPU
cudaMemcpy(d_u, u, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_u1, u1, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_u2, u2, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
int threadsPerBlock = 16;
int numBlocks = 1;
clock_t tic = clock();
process<<<numBlocks,threadsPerBlock>>>(d_u, d_u1, d_u2, T);
cudaDeviceSynchronize();
// copy back the result array to the CPU
cudaMemcpy(h_out, d_u, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaFree(d_u);
cudaFree(d_u1);
cudaFree(d_u2);
clock_t toc = clock();
printf("Elapsed: %f seconds\n", (double)(toc - tic) / CLOCKS_PER_SEC);
return 0;
}
|
ae87ddfa8fcc933f503814a81c4ccac7101f8854.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "addition.cuh"
__global__ void addition_kernel(int *d_a, int *d_b, int *d_c){
*d_c = *d_a + *d_b;
}
Addition::Addition(ros::NodeHandle *nh){
this->nh = nh;
this->sub_a = nh->subscribe("/cuda/input/a", 100, &Addition::callbackA, this);
this->sub_b = nh->subscribe("/cuda/input/b", 100, &Addition::callbackB, this);
}
Addition::~Addition(){
free(h_a);
free(h_b);
free(h_c);
}
void Addition::callbackA(const std_msgs::Int32 msg){
*this->h_a = msg.data;
copyInputToDevice();
runKernelGPU();
copyOutputToHost();
ROS_INFO("C = %d", *h_c);
}
void Addition::callbackB(const std_msgs::Int32 msg){
*this->h_b = msg.data;
copyInputToDevice();
runKernelGPU();
copyOutputToHost();
ROS_INFO("C = %d", *h_c);
}
void Addition::allocateDeviceMemory(){
h_a = (int*) malloc(sizeof(int)); *h_a = 0;
h_b = (int*) malloc(sizeof(int)); *h_b = 0;
h_c = (int*) malloc(sizeof(int)); *h_c = 0;
hipMalloc((void**) &d_a, sizeof(int));
hipMalloc((void**) &d_b, sizeof(int));
hipMalloc((void**) &d_c, sizeof(int));
}
void Addition::copyInputToDevice(){
hipMemcpy(d_a, h_a, sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, sizeof(int), hipMemcpyHostToDevice);
}
void Addition::runKernelGPU(){
hipLaunchKernelGGL(( addition_kernel), dim3(1),dim3(1), 0, 0, d_a, d_b, d_c);
}
void Addition::copyOutputToHost(){
hipMemcpy(h_c, d_c, sizeof(int), hipMemcpyDeviceToHost);
}
void Addition::freeDeviceMemeory(){
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
}
|
ae87ddfa8fcc933f503814a81c4ccac7101f8854.cu
|
#include "addition.cuh"
__global__ void addition_kernel(int *d_a, int *d_b, int *d_c){
*d_c = *d_a + *d_b;
}
Addition::Addition(ros::NodeHandle *nh){
this->nh = nh;
this->sub_a = nh->subscribe("/cuda/input/a", 100, &Addition::callbackA, this);
this->sub_b = nh->subscribe("/cuda/input/b", 100, &Addition::callbackB, this);
}
Addition::~Addition(){
free(h_a);
free(h_b);
free(h_c);
}
void Addition::callbackA(const std_msgs::Int32 msg){
*this->h_a = msg.data;
copyInputToDevice();
runKernelGPU();
copyOutputToHost();
ROS_INFO("C = %d", *h_c);
}
void Addition::callbackB(const std_msgs::Int32 msg){
*this->h_b = msg.data;
copyInputToDevice();
runKernelGPU();
copyOutputToHost();
ROS_INFO("C = %d", *h_c);
}
void Addition::allocateDeviceMemory(){
h_a = (int*) malloc(sizeof(int)); *h_a = 0;
h_b = (int*) malloc(sizeof(int)); *h_b = 0;
h_c = (int*) malloc(sizeof(int)); *h_c = 0;
cudaMalloc((void**) &d_a, sizeof(int));
cudaMalloc((void**) &d_b, sizeof(int));
cudaMalloc((void**) &d_c, sizeof(int));
}
void Addition::copyInputToDevice(){
cudaMemcpy(d_a, h_a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(int), cudaMemcpyHostToDevice);
}
void Addition::runKernelGPU(){
addition_kernel<<<1,1>>>(d_a, d_b, d_c);
}
void Addition::copyOutputToHost(){
cudaMemcpy(h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
}
void Addition::freeDeviceMemeory(){
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
cebcd30eb4e10b5fd317e78b3305eab97d44ce01.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel2_xdir;
int xdim0_advec_cell_kernel2_xdir_h = -1;
__constant__ int ydim0_advec_cell_kernel2_xdir;
int ydim0_advec_cell_kernel2_xdir_h = -1;
__constant__ int xdim1_advec_cell_kernel2_xdir;
int xdim1_advec_cell_kernel2_xdir_h = -1;
__constant__ int ydim1_advec_cell_kernel2_xdir;
int ydim1_advec_cell_kernel2_xdir_h = -1;
__constant__ int xdim2_advec_cell_kernel2_xdir;
int xdim2_advec_cell_kernel2_xdir_h = -1;
__constant__ int ydim2_advec_cell_kernel2_xdir;
int ydim2_advec_cell_kernel2_xdir_h = -1;
__constant__ int xdim3_advec_cell_kernel2_xdir;
int xdim3_advec_cell_kernel2_xdir_h = -1;
__constant__ int ydim3_advec_cell_kernel2_xdir;
int ydim3_advec_cell_kernel2_xdir_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel2_xdir*(y)+xdim0_advec_cell_kernel2_xdir*ydim0_advec_cell_kernel2_xdir*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel2_xdir*(y)+xdim1_advec_cell_kernel2_xdir*ydim1_advec_cell_kernel2_xdir*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel2_xdir*(y)+xdim2_advec_cell_kernel2_xdir*ydim2_advec_cell_kernel2_xdir*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel2_xdir*(y)+xdim3_advec_cell_kernel2_xdir*ydim3_advec_cell_kernel2_xdir*(z))
//user function
__device__
inline void advec_cell_kernel2_xdir( double *pre_vol, double *post_vol, const double *volume,
const double *vol_flux_x) {
pre_vol[OPS_ACC0(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_x[OPS_ACC3(1,0,0)] - vol_flux_x[OPS_ACC3(0,0,0)];
post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_advec_cell_kernel2_xdir(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_advec_cell_kernel2_xdir + idx_z * 1 * xdim0_advec_cell_kernel2_xdir * ydim0_advec_cell_kernel2_xdir;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_advec_cell_kernel2_xdir + idx_z * 1 * xdim1_advec_cell_kernel2_xdir * ydim1_advec_cell_kernel2_xdir;
arg2 += idx_x * 1 + idx_y * 1 * xdim2_advec_cell_kernel2_xdir + idx_z * 1 * xdim2_advec_cell_kernel2_xdir * ydim2_advec_cell_kernel2_xdir;
arg3 += idx_x * 1 + idx_y * 1 * xdim3_advec_cell_kernel2_xdir + idx_z * 1 * xdim3_advec_cell_kernel2_xdir * ydim3_advec_cell_kernel2_xdir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel2_xdir(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_advec_cell_kernel2_xdir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_arg args[4] = { arg0, arg1, arg2, arg3};
ops_timing_realloc(30,"advec_cell_kernel2_xdir");
OPS_kernels[30].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0]*args[3].dat->dim;
int ydim3 = args[3].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_advec_cell_kernel2_xdir_h || ydim0 != ydim0_advec_cell_kernel2_xdir_h || xdim1 != xdim1_advec_cell_kernel2_xdir_h || ydim1 != ydim1_advec_cell_kernel2_xdir_h || xdim2 != xdim2_advec_cell_kernel2_xdir_h || ydim2 != ydim2_advec_cell_kernel2_xdir_h || xdim3 != xdim3_advec_cell_kernel2_xdir_h || ydim3 != ydim3_advec_cell_kernel2_xdir_h) {
hipMemcpyToSymbol( xdim0_advec_cell_kernel2_xdir, &xdim0, sizeof(int) );
xdim0_advec_cell_kernel2_xdir_h = xdim0;
hipMemcpyToSymbol( ydim0_advec_cell_kernel2_xdir, &ydim0, sizeof(int) );
ydim0_advec_cell_kernel2_xdir_h = ydim0;
hipMemcpyToSymbol( xdim1_advec_cell_kernel2_xdir, &xdim1, sizeof(int) );
xdim1_advec_cell_kernel2_xdir_h = xdim1;
hipMemcpyToSymbol( ydim1_advec_cell_kernel2_xdir, &ydim1, sizeof(int) );
ydim1_advec_cell_kernel2_xdir_h = ydim1;
hipMemcpyToSymbol( xdim2_advec_cell_kernel2_xdir, &xdim2, sizeof(int) );
xdim2_advec_cell_kernel2_xdir_h = xdim2;
hipMemcpyToSymbol( ydim2_advec_cell_kernel2_xdir, &ydim2, sizeof(int) );
ydim2_advec_cell_kernel2_xdir_h = ydim2;
hipMemcpyToSymbol( xdim3_advec_cell_kernel2_xdir, &xdim3, sizeof(int) );
xdim3_advec_cell_kernel2_xdir_h = xdim3;
hipMemcpyToSymbol( ydim3_advec_cell_kernel2_xdir, &ydim3, sizeof(int) );
ydim3_advec_cell_kernel2_xdir_h = ydim3;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
ops_timers_core(&c1,&t1);
OPS_kernels[30].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_cell_kernel2_xdir), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[30].time += t2-t1;
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[30].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[30].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[30].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[30].transfer += ops_compute_transfer(dim, range, &arg3);
}
|
cebcd30eb4e10b5fd317e78b3305eab97d44ce01.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel2_xdir;
int xdim0_advec_cell_kernel2_xdir_h = -1;
__constant__ int ydim0_advec_cell_kernel2_xdir;
int ydim0_advec_cell_kernel2_xdir_h = -1;
__constant__ int xdim1_advec_cell_kernel2_xdir;
int xdim1_advec_cell_kernel2_xdir_h = -1;
__constant__ int ydim1_advec_cell_kernel2_xdir;
int ydim1_advec_cell_kernel2_xdir_h = -1;
__constant__ int xdim2_advec_cell_kernel2_xdir;
int xdim2_advec_cell_kernel2_xdir_h = -1;
__constant__ int ydim2_advec_cell_kernel2_xdir;
int ydim2_advec_cell_kernel2_xdir_h = -1;
__constant__ int xdim3_advec_cell_kernel2_xdir;
int xdim3_advec_cell_kernel2_xdir_h = -1;
__constant__ int ydim3_advec_cell_kernel2_xdir;
int ydim3_advec_cell_kernel2_xdir_h = -1;
#define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel2_xdir*(y)+xdim0_advec_cell_kernel2_xdir*ydim0_advec_cell_kernel2_xdir*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel2_xdir*(y)+xdim1_advec_cell_kernel2_xdir*ydim1_advec_cell_kernel2_xdir*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel2_xdir*(y)+xdim2_advec_cell_kernel2_xdir*ydim2_advec_cell_kernel2_xdir*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel2_xdir*(y)+xdim3_advec_cell_kernel2_xdir*ydim3_advec_cell_kernel2_xdir*(z))
//user function
__device__
inline void advec_cell_kernel2_xdir( double *pre_vol, double *post_vol, const double *volume,
const double *vol_flux_x) {
pre_vol[OPS_ACC0(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_x[OPS_ACC3(1,0,0)] - vol_flux_x[OPS_ACC3(0,0,0)];
post_vol[OPS_ACC1(0,0,0)] = volume[OPS_ACC2(0,0,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
__global__ void ops_advec_cell_kernel2_xdir(
double* __restrict arg0,
double* __restrict arg1,
const double* __restrict arg2,
const double* __restrict arg3,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 + idx_y * 1 * xdim0_advec_cell_kernel2_xdir + idx_z * 1 * xdim0_advec_cell_kernel2_xdir * ydim0_advec_cell_kernel2_xdir;
arg1 += idx_x * 1 + idx_y * 1 * xdim1_advec_cell_kernel2_xdir + idx_z * 1 * xdim1_advec_cell_kernel2_xdir * ydim1_advec_cell_kernel2_xdir;
arg2 += idx_x * 1 + idx_y * 1 * xdim2_advec_cell_kernel2_xdir + idx_z * 1 * xdim2_advec_cell_kernel2_xdir * ydim2_advec_cell_kernel2_xdir;
arg3 += idx_x * 1 + idx_y * 1 * xdim3_advec_cell_kernel2_xdir + idx_z * 1 * xdim3_advec_cell_kernel2_xdir * ydim3_advec_cell_kernel2_xdir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel2_xdir(arg0, arg1, arg2, arg3);
}
}
// host stub function
void ops_par_loop_advec_cell_kernel2_xdir(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) {
ops_arg args[4] = { arg0, arg1, arg2, arg3};
ops_timing_realloc(30,"advec_cell_kernel2_xdir");
OPS_kernels[30].count++;
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif //OPS_MPI
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0]*args[0].dat->dim;
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0]*args[1].dat->dim;
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0]*args[2].dat->dim;
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0]*args[3].dat->dim;
int ydim3 = args[3].dat->size[1];
//Timing
double t1,t2,c1,c2;
ops_timers_core(&c2,&t2);
if (xdim0 != xdim0_advec_cell_kernel2_xdir_h || ydim0 != ydim0_advec_cell_kernel2_xdir_h || xdim1 != xdim1_advec_cell_kernel2_xdir_h || ydim1 != ydim1_advec_cell_kernel2_xdir_h || xdim2 != xdim2_advec_cell_kernel2_xdir_h || ydim2 != ydim2_advec_cell_kernel2_xdir_h || xdim3 != xdim3_advec_cell_kernel2_xdir_h || ydim3 != ydim3_advec_cell_kernel2_xdir_h) {
cudaMemcpyToSymbol( xdim0_advec_cell_kernel2_xdir, &xdim0, sizeof(int) );
xdim0_advec_cell_kernel2_xdir_h = xdim0;
cudaMemcpyToSymbol( ydim0_advec_cell_kernel2_xdir, &ydim0, sizeof(int) );
ydim0_advec_cell_kernel2_xdir_h = ydim0;
cudaMemcpyToSymbol( xdim1_advec_cell_kernel2_xdir, &xdim1, sizeof(int) );
xdim1_advec_cell_kernel2_xdir_h = xdim1;
cudaMemcpyToSymbol( ydim1_advec_cell_kernel2_xdir, &ydim1, sizeof(int) );
ydim1_advec_cell_kernel2_xdir_h = ydim1;
cudaMemcpyToSymbol( xdim2_advec_cell_kernel2_xdir, &xdim2, sizeof(int) );
xdim2_advec_cell_kernel2_xdir_h = xdim2;
cudaMemcpyToSymbol( ydim2_advec_cell_kernel2_xdir, &ydim2, sizeof(int) );
ydim2_advec_cell_kernel2_xdir_h = ydim2;
cudaMemcpyToSymbol( xdim3_advec_cell_kernel2_xdir, &xdim3, sizeof(int) );
xdim3_advec_cell_kernel2_xdir_h = xdim3;
cudaMemcpyToSymbol( ydim3_advec_cell_kernel2_xdir, &ydim3, sizeof(int) );
ydim3_advec_cell_kernel2_xdir_h = ydim3;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
int dat3 = args[3].dat->elem_size;
char *p_a[4];
//set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d];
#endif //OPS_MPI
int base0 = dat0 * 1 *
(start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d];
#endif //OPS_MPI
int base1 = dat1 * 1 *
(start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d];
#endif //OPS_MPI
int base2 = dat2 * 1 *
(start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] - d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d];
#else //OPS_MPI
for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d];
#endif //OPS_MPI
int base3 = dat3 * 1 *
(start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2] - args[3].dat->base[2] - d_m[2]);
p_a[3] = (char *)args[3].data_d + base3;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args,4,range);
ops_timers_core(&c1,&t1);
OPS_kernels[30].mpi_time += t1-t2;
//call kernel wrapper function, passing in pointers to data
ops_advec_cell_kernel2_xdir<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size);
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
}
ops_timers_core(&c2,&t2);
OPS_kernels[30].time += t2-t1;
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
//Update kernel record
OPS_kernels[30].transfer += ops_compute_transfer(dim, range, &arg0);
OPS_kernels[30].transfer += ops_compute_transfer(dim, range, &arg1);
OPS_kernels[30].transfer += ops_compute_transfer(dim, range, &arg2);
OPS_kernels[30].transfer += ops_compute_transfer(dim, range, &arg3);
}
|
c4b288d378bdb2fb5e6ae6fd2b46c1dc5068fdf5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_t1;
int xdim0_update_halo_kernel1_t1_h = -1;
__constant__ int ydim0_update_halo_kernel1_t1;
int ydim0_update_halo_kernel1_t1_h = -1;
__constant__ int xdim1_update_halo_kernel1_t1;
int xdim1_update_halo_kernel1_t1_h = -1;
__constant__ int ydim1_update_halo_kernel1_t1;
int ydim1_update_halo_kernel1_t1_h = -1;
__constant__ int xdim2_update_halo_kernel1_t1;
int xdim2_update_halo_kernel1_t1_h = -1;
__constant__ int ydim2_update_halo_kernel1_t1;
int ydim2_update_halo_kernel1_t1_h = -1;
__constant__ int xdim3_update_halo_kernel1_t1;
int xdim3_update_halo_kernel1_t1_h = -1;
__constant__ int ydim3_update_halo_kernel1_t1;
int ydim3_update_halo_kernel1_t1_h = -1;
__constant__ int xdim4_update_halo_kernel1_t1;
int xdim4_update_halo_kernel1_t1_h = -1;
__constant__ int ydim4_update_halo_kernel1_t1;
int ydim4_update_halo_kernel1_t1_h = -1;
__constant__ int xdim5_update_halo_kernel1_t1;
int xdim5_update_halo_kernel1_t1_h = -1;
__constant__ int ydim5_update_halo_kernel1_t1;
int ydim5_update_halo_kernel1_t1_h = -1;
__constant__ int xdim6_update_halo_kernel1_t1;
int xdim6_update_halo_kernel1_t1_h = -1;
__constant__ int ydim6_update_halo_kernel1_t1;
int ydim6_update_halo_kernel1_t1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_t1*(y)+xdim0_update_halo_kernel1_t1*ydim0_update_halo_kernel1_t1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_t1*(y)+xdim1_update_halo_kernel1_t1*ydim1_update_halo_kernel1_t1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_t1*(y)+xdim2_update_halo_kernel1_t1*ydim2_update_halo_kernel1_t1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_t1*(y)+xdim3_update_halo_kernel1_t1*ydim3_update_halo_kernel1_t1*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_t1*(y)+xdim4_update_halo_kernel1_t1*ydim4_update_halo_kernel1_t1*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_t1*(y)+xdim5_update_halo_kernel1_t1*ydim5_update_halo_kernel1_t1*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_t1*(y)+xdim6_update_halo_kernel1_t1*ydim6_update_halo_kernel1_t1*(z))
//user function
__device__
inline void update_halo_kernel1_t1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,-1,0)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,-1,0)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,-1,0)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,-1,0)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,-1,0)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,-1,0)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,-1,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_t1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_t1 + idx_z * 1*1 * xdim0_update_halo_kernel1_t1 * ydim0_update_halo_kernel1_t1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_t1 + idx_z * 1*1 * xdim1_update_halo_kernel1_t1 * ydim1_update_halo_kernel1_t1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_t1 + idx_z * 1*1 * xdim2_update_halo_kernel1_t1 * ydim2_update_halo_kernel1_t1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_t1 + idx_z * 1*1 * xdim3_update_halo_kernel1_t1 * ydim3_update_halo_kernel1_t1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_t1 + idx_z * 1*1 * xdim4_update_halo_kernel1_t1 * ydim4_update_halo_kernel1_t1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_t1 + idx_z * 1*1 * xdim5_update_halo_kernel1_t1 * ydim5_update_halo_kernel1_t1;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_t1 + idx_z * 1*1 * xdim6_update_halo_kernel1_t1 * ydim6_update_halo_kernel1_t1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,15)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(15,"update_halo_kernel1_t1");
OPS_kernels[15].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_t1_h || ydim0 != ydim0_update_halo_kernel1_t1_h || xdim1 != xdim1_update_halo_kernel1_t1_h || ydim1 != ydim1_update_halo_kernel1_t1_h || xdim2 != xdim2_update_halo_kernel1_t1_h || ydim2 != ydim2_update_halo_kernel1_t1_h || xdim3 != xdim3_update_halo_kernel1_t1_h || ydim3 != ydim3_update_halo_kernel1_t1_h || xdim4 != xdim4_update_halo_kernel1_t1_h || ydim4 != ydim4_update_halo_kernel1_t1_h || xdim5 != xdim5_update_halo_kernel1_t1_h || ydim5 != ydim5_update_halo_kernel1_t1_h || xdim6 != xdim6_update_halo_kernel1_t1_h || ydim6 != ydim6_update_halo_kernel1_t1_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_t1_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel1_t1, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_t1_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_t1_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel1_t1, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_t1_h = ydim1;
hipMemcpyToSymbol( xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_t1_h = xdim2;
hipMemcpyToSymbol( ydim2_update_halo_kernel1_t1, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_t1_h = ydim2;
hipMemcpyToSymbol( xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_t1_h = xdim3;
hipMemcpyToSymbol( ydim3_update_halo_kernel1_t1, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_t1_h = ydim3;
hipMemcpyToSymbol( xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_t1_h = xdim4;
hipMemcpyToSymbol( ydim4_update_halo_kernel1_t1, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_t1_h = ydim4;
hipMemcpyToSymbol( xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_t1_h = xdim5;
hipMemcpyToSymbol( ydim5_update_halo_kernel1_t1, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_t1_h = ydim5;
hipMemcpyToSymbol( xdim6_update_halo_kernel1_t1, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_t1_h = xdim6;
hipMemcpyToSymbol( ydim6_update_halo_kernel1_t1, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_t1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[15].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_t1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[15].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[15].mpi_time += t2-t1;
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 15;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 15;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(15,"update_halo_kernel1_t1");
}
ops_enqueue_kernel(desc);
}
#endif
|
c4b288d378bdb2fb5e6ae6fd2b46c1dc5068fdf5.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel1_t1;
int xdim0_update_halo_kernel1_t1_h = -1;
__constant__ int ydim0_update_halo_kernel1_t1;
int ydim0_update_halo_kernel1_t1_h = -1;
__constant__ int xdim1_update_halo_kernel1_t1;
int xdim1_update_halo_kernel1_t1_h = -1;
__constant__ int ydim1_update_halo_kernel1_t1;
int ydim1_update_halo_kernel1_t1_h = -1;
__constant__ int xdim2_update_halo_kernel1_t1;
int xdim2_update_halo_kernel1_t1_h = -1;
__constant__ int ydim2_update_halo_kernel1_t1;
int ydim2_update_halo_kernel1_t1_h = -1;
__constant__ int xdim3_update_halo_kernel1_t1;
int xdim3_update_halo_kernel1_t1_h = -1;
__constant__ int ydim3_update_halo_kernel1_t1;
int ydim3_update_halo_kernel1_t1_h = -1;
__constant__ int xdim4_update_halo_kernel1_t1;
int xdim4_update_halo_kernel1_t1_h = -1;
__constant__ int ydim4_update_halo_kernel1_t1;
int ydim4_update_halo_kernel1_t1_h = -1;
__constant__ int xdim5_update_halo_kernel1_t1;
int xdim5_update_halo_kernel1_t1_h = -1;
__constant__ int ydim5_update_halo_kernel1_t1;
int ydim5_update_halo_kernel1_t1_h = -1;
__constant__ int xdim6_update_halo_kernel1_t1;
int xdim6_update_halo_kernel1_t1_h = -1;
__constant__ int ydim6_update_halo_kernel1_t1;
int ydim6_update_halo_kernel1_t1_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel1_t1*(y)+xdim0_update_halo_kernel1_t1*ydim0_update_halo_kernel1_t1*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel1_t1*(y)+xdim1_update_halo_kernel1_t1*ydim1_update_halo_kernel1_t1*(z))
#define OPS_ACC2(x,y,z) (x+xdim2_update_halo_kernel1_t1*(y)+xdim2_update_halo_kernel1_t1*ydim2_update_halo_kernel1_t1*(z))
#define OPS_ACC3(x,y,z) (x+xdim3_update_halo_kernel1_t1*(y)+xdim3_update_halo_kernel1_t1*ydim3_update_halo_kernel1_t1*(z))
#define OPS_ACC4(x,y,z) (x+xdim4_update_halo_kernel1_t1*(y)+xdim4_update_halo_kernel1_t1*ydim4_update_halo_kernel1_t1*(z))
#define OPS_ACC5(x,y,z) (x+xdim5_update_halo_kernel1_t1*(y)+xdim5_update_halo_kernel1_t1*ydim5_update_halo_kernel1_t1*(z))
#define OPS_ACC6(x,y,z) (x+xdim6_update_halo_kernel1_t1*(y)+xdim6_update_halo_kernel1_t1*ydim6_update_halo_kernel1_t1*(z))
//user function
__device__
inline void update_halo_kernel1_t1_gpu(double *density0, double *density1,
double *energy0, double *energy1,
double *pressure, double *viscosity,
double *soundspeed , const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0[OPS_ACC0(0,0,0)] = density0[OPS_ACC0(0,-1,0)];
if(fields[FIELD_DENSITY1] == 1) density1[OPS_ACC1(0,0,0)] = density1[OPS_ACC1(0,-1,0)];
if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC2(0,0,0)] = energy0[OPS_ACC2(0,-1,0)];
if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC3(0,0,0)] = energy1[OPS_ACC3(0,-1,0)];
if(fields[FIELD_PRESSURE] == 1) pressure[OPS_ACC4(0,0,0)] = pressure[OPS_ACC4(0,-1,0)];
if(fields[FIELD_VISCOSITY] == 1) viscosity[OPS_ACC5(0,0,0)] = viscosity[OPS_ACC5(0,-1,0)];
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed[OPS_ACC6(0,0,0)] = soundspeed[OPS_ACC6(0,-1,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#undef OPS_ACC6
__global__ void ops_update_halo_kernel1_t1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_t1 + idx_z * 1*1 * xdim0_update_halo_kernel1_t1 * ydim0_update_halo_kernel1_t1;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_t1 + idx_z * 1*1 * xdim1_update_halo_kernel1_t1 * ydim1_update_halo_kernel1_t1;
arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_t1 + idx_z * 1*1 * xdim2_update_halo_kernel1_t1 * ydim2_update_halo_kernel1_t1;
arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_t1 + idx_z * 1*1 * xdim3_update_halo_kernel1_t1 * ydim3_update_halo_kernel1_t1;
arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_t1 + idx_z * 1*1 * xdim4_update_halo_kernel1_t1 * ydim4_update_halo_kernel1_t1;
arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_t1 + idx_z * 1*1 * xdim5_update_halo_kernel1_t1 * ydim5_update_halo_kernel1_t1;
arg6 += idx_x * 1*1 + idx_y * 1*1 * xdim6_update_halo_kernel1_t1 + idx_z * 1*1 * xdim6_update_halo_kernel1_t1 * ydim6_update_halo_kernel1_t1;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3,
arg4, arg5, arg6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_t1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,15)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(15,"update_halo_kernel1_t1");
OPS_kernels[15].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel1_t1_h || ydim0 != ydim0_update_halo_kernel1_t1_h || xdim1 != xdim1_update_halo_kernel1_t1_h || ydim1 != ydim1_update_halo_kernel1_t1_h || xdim2 != xdim2_update_halo_kernel1_t1_h || ydim2 != ydim2_update_halo_kernel1_t1_h || xdim3 != xdim3_update_halo_kernel1_t1_h || ydim3 != ydim3_update_halo_kernel1_t1_h || xdim4 != xdim4_update_halo_kernel1_t1_h || ydim4 != ydim4_update_halo_kernel1_t1_h || xdim5 != xdim5_update_halo_kernel1_t1_h || ydim5 != ydim5_update_halo_kernel1_t1_h || xdim6 != xdim6_update_halo_kernel1_t1_h || ydim6 != ydim6_update_halo_kernel1_t1_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int) );
xdim0_update_halo_kernel1_t1_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel1_t1, &ydim0, sizeof(int) );
ydim0_update_halo_kernel1_t1_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int) );
xdim1_update_halo_kernel1_t1_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel1_t1, &ydim1, sizeof(int) );
ydim1_update_halo_kernel1_t1_h = ydim1;
cudaMemcpyToSymbol( xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int) );
xdim2_update_halo_kernel1_t1_h = xdim2;
cudaMemcpyToSymbol( ydim2_update_halo_kernel1_t1, &ydim2, sizeof(int) );
ydim2_update_halo_kernel1_t1_h = ydim2;
cudaMemcpyToSymbol( xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int) );
xdim3_update_halo_kernel1_t1_h = xdim3;
cudaMemcpyToSymbol( ydim3_update_halo_kernel1_t1, &ydim3, sizeof(int) );
ydim3_update_halo_kernel1_t1_h = ydim3;
cudaMemcpyToSymbol( xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int) );
xdim4_update_halo_kernel1_t1_h = xdim4;
cudaMemcpyToSymbol( ydim4_update_halo_kernel1_t1, &ydim4, sizeof(int) );
ydim4_update_halo_kernel1_t1_h = ydim4;
cudaMemcpyToSymbol( xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int) );
xdim5_update_halo_kernel1_t1_h = xdim5;
cudaMemcpyToSymbol( ydim5_update_halo_kernel1_t1, &ydim5, sizeof(int) );
ydim5_update_halo_kernel1_t1_h = ydim5;
cudaMemcpyToSymbol( xdim6_update_halo_kernel1_t1, &xdim6, sizeof(int) );
xdim6_update_halo_kernel1_t1_h = xdim6;
cudaMemcpyToSymbol( ydim6_update_halo_kernel1_t1, &ydim6, sizeof(int) );
ydim6_update_halo_kernel1_t1_h = ydim6;
}
int *arg7h = (int *)arg7.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[15].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel1_t1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[15].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[15].mpi_time += t2-t1;
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[15].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 15;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 15;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_t1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(15,"update_halo_kernel1_t1");
}
ops_enqueue_kernel(desc);
}
#endif
|
3e5bf38e1e5dac02b31afe88059de1b49179d403.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <helper_cuda.h>
#include <rocblas.h>
#define BLOCK_SIZE 32
//Sequential version: One thread does it all. AKA--> Launch configuration <<<1,1,>>>
__global__
void gpu1(int m, int n, int k, double* d_A, double* d_B, double* d_C ){
for (int i = 0; i < m; i++){
for (int j = 0; j < n; j++){
d_C[i*(n) + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*(n) + j] += d_A[i*(k) + rc] * d_B[rc*(n) + j];
}
}
}
}
//Naive version: One thread per element in C
__global__
void gpu2( int m, int n, int k, double* d_A, double* d_B, double* d_C) {
int i = blockIdx.y*blockDim.y+threadIdx.y; //column thead id
int j = blockIdx.x*blockDim.x+threadIdx.x; //row thread id
if (j<m && i<n) {
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
}
}
//one thread every 2 elements of C
__global__
void gpu3(int m, int n, int k, double* d_A, double* d_B, double* d_C ){
int i = blockIdx.y*blockDim.y+threadIdx.y; //column thead id
int j = blockIdx.x*2*blockDim.x+threadIdx.x; //row thread id
int j2 = j + blockDim.x;
if (j<m && i<n && j2 < m) {
d_C[i*n + j2] = 0.0;
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j2] += d_A[i*k + rc] * d_B[rc*n + j2];
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
} else if (j<m && i<n) {
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
}
}
//one thread every 4 elements of C
__global__
void gpu4(int m, int n, int k, double* d_A, double* d_B, double* d_C ){
int i = blockIdx.y*blockDim.y+threadIdx.y; //column thead id
int j = blockIdx.x*4*blockDim.x+threadIdx.x; //row thread id
int j2 = j + blockDim.x;
int j3 = j2 + blockDim.x;
int j4 = j3 + blockDim.x;
if (j<m && i<n && j2 < m && j3 < m && j4 < m) {
d_C[i*n + j4] = 0.0;
d_C[i*n + j3] = 0.0;
d_C[i*n + j2] = 0.0;
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j4] += d_A[i*k + rc] * d_B[rc*n + j4];
d_C[i*n + j3] += d_A[i*k + rc] * d_B[rc*n + j3];
d_C[i*n + j2] += d_A[i*k + rc] * d_B[rc*n + j2];
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
} else if (j<m && i<n && j3 < m) {
d_C[i*n + j3] = 0.0;
d_C[i*n + j2] = 0.0;
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j3] += d_A[i*k + rc] * d_B[rc*n + j3];
d_C[i*n + j2] += d_A[i*k + rc] * d_B[rc*n + j2];
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
} else if (j<m && i<n && j2 < m) {
d_C[i*n + j2] = 0.0;
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j2] += d_A[i*k + rc] * d_B[rc*n + j2];
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
} else if (j<m && i<n) {
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
}
}
//blocked version with shared memory
__global__
void gpu5(int m, int n, int k, double* d_A, double* d_B, double* d_C ){
__shared__ double A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double B[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double C[BLOCK_SIZE][BLOCK_SIZE];
int i = blockIdx.y*blockDim.y+threadIdx.y; //column thead id
int j = blockIdx.x*blockDim.x+threadIdx.x; //row thread id
C[threadIdx.x][threadIdx.y] = 0.0;
for (int rc = 0; rc < k; rc+=BLOCK_SIZE){
A[threadIdx.y][threadIdx.x] = d_A[i*k + rc + threadIdx.x];
B[threadIdx.y][threadIdx.x] = d_B[(rc + threadIdx.y)*n + j];
__syncthreads();
for (int rc2 = 0; rc2 < BLOCK_SIZE; rc2++){
C[threadIdx.y][threadIdx.x] += A[threadIdx.y][rc2] * B[rc2][threadIdx.x];
}
__syncthreads();
}
d_C[i*n + j] = C[threadIdx.y][threadIdx.x];
}
extern "C" {
#include <cblas.h>
void matmult_lib(int m, int n, int k,double* A, double* B, double* C){
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0, A, k, B, n, 0.0, C, n);
//cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, n, m, k, 1.0, B, n, A, k, 0.0, C, n);
}
void matmult_gpulib(int m, int n, int k,double* A, double* B, double* C){
double* d_A; hipMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; hipMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; hipMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
hipMemcpy(d_A, A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, B, k*n*sizeof(double), hipMemcpyHostToDevice);
// Do the actual multiplication
hipblasDgemm('n', 'n', n, m, k, 1.0, d_B, n, d_A, k, 0.0, d_C, n);
checkCudaErrors(hipDeviceSynchronize());
// Transfer results from device to host
hipMemcpy(C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_C);
hipFree(d_A);
hipFree(d_B);
}
void matmult_gpu5(int m, int n, int k, double* h_A, double* h_B, double* h_C){
double* d_A; hipMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; hipMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; hipMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
hipMemcpy(d_A, h_A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k*n*sizeof(double), hipMemcpyHostToDevice);
dim3 NUM_BLOCKS = dim3(m/32, n/32, 1);
dim3 NUM_THREADS = dim3(32, 32, 1);
// Kernel launch
hipLaunchKernelGGL(( gpu5), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, m, n, k, d_A, d_B, d_C);
checkCudaErrors(hipDeviceSynchronize());
// Transfer results from device to host
hipMemcpy(h_C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_C);
hipFree(d_A);
hipFree(d_B);
}
void matmult_gpu4(int m, int n, int k, double* h_A, double* h_B, double* h_C){
double* d_A; hipMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; hipMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; hipMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
hipMemcpy(d_A, h_A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k*n*sizeof(double), hipMemcpyHostToDevice);
int blockx = 0, blocky = 0;
if(m%128)
blockx = 1;
if (n%32)
blocky = 1;
dim3 NUM_BLOCKS = dim3(m/128 + blockx, n/32 + blocky, 1);
dim3 NUM_THREADS = dim3(32, 32, 1);
if (m*n/4 <= 1024){
NUM_BLOCKS = dim3(1, 1, 1);
blockx = 0;
if (m%4)
blockx = 1;
NUM_THREADS = dim3(m/4 + blockx, n, 1);
}
// Kernel launch
hipLaunchKernelGGL(( gpu4), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, m, n, k, d_A, d_B, d_C);
checkCudaErrors(hipDeviceSynchronize());
// Transfer results from device to host
hipMemcpy(h_C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_C);
hipFree(d_A);
hipFree(d_B);
}
void matmult_gpu3(int m, int n, int k, double* h_A, double* h_B, double* h_C){
double* d_A; hipMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; hipMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; hipMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
hipMemcpy(d_A, h_A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k*n*sizeof(double), hipMemcpyHostToDevice);
int blockx = 0, blocky = 0;
if(m%64)
blockx = 1;
if (n%32)
blocky = 1;
dim3 NUM_BLOCKS = dim3(m/64 + blockx, n/32 + blocky, 1);
dim3 NUM_THREADS = dim3(32, 32, 1);
if (m*n/2 <= 1024){
NUM_BLOCKS = dim3(1, 1, 1);
NUM_THREADS = dim3(m/2 + m % 2, n, 1);
}
// Kernel launch
hipLaunchKernelGGL(( gpu3), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, m, n, k, d_A, d_B, d_C);
checkCudaErrors(hipDeviceSynchronize());
// Transfer results from device to host
hipMemcpy(h_C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_C);
hipFree(d_A);
hipFree(d_B);
}
void matmult_gpu2(int m, int n, int k, double* h_A, double* h_B, double* h_C){
double* d_A; hipMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; hipMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; hipMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
hipMemcpy(d_A, h_A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k*n*sizeof(double), hipMemcpyHostToDevice);
int blockx = 0, blocky = 0;
if(m%32)
blockx = 1;
if (n%32)
blocky = 1;
dim3 NUM_BLOCKS = dim3(m/32 + blockx, n/32 + blocky, 1);
dim3 NUM_THREADS = dim3(32, 32, 1);
if (m*n <= 1024){
NUM_BLOCKS = dim3(1, 1, 1);
NUM_THREADS = dim3(m, n, 1);
}
// Kernel launch
hipLaunchKernelGGL(( gpu2), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, m, n, k, d_A, d_B, d_C);
checkCudaErrors(hipDeviceSynchronize());
// Transfer results from device to host
hipMemcpy(h_C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_C);
hipFree(d_A);
hipFree(d_B);
}
void matmult_gpu1(int m, int n, int k, double* h_A, double* h_B, double* h_C ){
double* d_A; hipMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; hipMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; hipMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
hipMemcpy(d_A, h_A, m*k*sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, k*n*sizeof(double), hipMemcpyHostToDevice);
// Kernel launch
hipLaunchKernelGGL(( gpu1), dim3(1),dim3(1), 0, 0, m, n, k, d_A, d_B, d_C);
checkCudaErrors(hipDeviceSynchronize());
// Transfer results from device to host
hipMemcpy(h_C, d_C, m*n*sizeof(double), hipMemcpyDeviceToHost);
// Free memory
hipFree(d_C);
hipFree(d_A);
hipFree(d_B);
}
} // end extern C
|
3e5bf38e1e5dac02b31afe88059de1b49179d403.cu
|
#include <stdio.h>
#include <helper_cuda.h>
#include <cublas.h>
#define BLOCK_SIZE 32
//Sequential version: One thread does it all. AKA--> Launch configuration <<<1,1,>>>
__global__
void gpu1(int m, int n, int k, double* d_A, double* d_B, double* d_C ){
for (int i = 0; i < m; i++){
for (int j = 0; j < n; j++){
d_C[i*(n) + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*(n) + j] += d_A[i*(k) + rc] * d_B[rc*(n) + j];
}
}
}
}
//Naive version: One thread per element in C
__global__
void gpu2( int m, int n, int k, double* d_A, double* d_B, double* d_C) {
int i = blockIdx.y*blockDim.y+threadIdx.y; //column thead id
int j = blockIdx.x*blockDim.x+threadIdx.x; //row thread id
if (j<m && i<n) {
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
}
}
//one thread every 2 elements of C
__global__
void gpu3(int m, int n, int k, double* d_A, double* d_B, double* d_C ){
int i = blockIdx.y*blockDim.y+threadIdx.y; //column thead id
int j = blockIdx.x*2*blockDim.x+threadIdx.x; //row thread id
int j2 = j + blockDim.x;
if (j<m && i<n && j2 < m) {
d_C[i*n + j2] = 0.0;
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j2] += d_A[i*k + rc] * d_B[rc*n + j2];
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
} else if (j<m && i<n) {
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
}
}
//one thread every 4 elements of C
__global__
void gpu4(int m, int n, int k, double* d_A, double* d_B, double* d_C ){
int i = blockIdx.y*blockDim.y+threadIdx.y; //column thead id
int j = blockIdx.x*4*blockDim.x+threadIdx.x; //row thread id
int j2 = j + blockDim.x;
int j3 = j2 + blockDim.x;
int j4 = j3 + blockDim.x;
if (j<m && i<n && j2 < m && j3 < m && j4 < m) {
d_C[i*n + j4] = 0.0;
d_C[i*n + j3] = 0.0;
d_C[i*n + j2] = 0.0;
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j4] += d_A[i*k + rc] * d_B[rc*n + j4];
d_C[i*n + j3] += d_A[i*k + rc] * d_B[rc*n + j3];
d_C[i*n + j2] += d_A[i*k + rc] * d_B[rc*n + j2];
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
} else if (j<m && i<n && j3 < m) {
d_C[i*n + j3] = 0.0;
d_C[i*n + j2] = 0.0;
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j3] += d_A[i*k + rc] * d_B[rc*n + j3];
d_C[i*n + j2] += d_A[i*k + rc] * d_B[rc*n + j2];
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
} else if (j<m && i<n && j2 < m) {
d_C[i*n + j2] = 0.0;
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j2] += d_A[i*k + rc] * d_B[rc*n + j2];
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
} else if (j<m && i<n) {
d_C[i*n + j] = 0.0;
for (int rc = 0; rc < k; rc++){
d_C[i*n + j] += d_A[i*k + rc] * d_B[rc*n + j];
}
}
}
//blocked version with shared memory
__global__
void gpu5(int m, int n, int k, double* d_A, double* d_B, double* d_C ){
__shared__ double A[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double B[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double C[BLOCK_SIZE][BLOCK_SIZE];
int i = blockIdx.y*blockDim.y+threadIdx.y; //column thead id
int j = blockIdx.x*blockDim.x+threadIdx.x; //row thread id
C[threadIdx.x][threadIdx.y] = 0.0;
for (int rc = 0; rc < k; rc+=BLOCK_SIZE){
A[threadIdx.y][threadIdx.x] = d_A[i*k + rc + threadIdx.x];
B[threadIdx.y][threadIdx.x] = d_B[(rc + threadIdx.y)*n + j];
__syncthreads();
for (int rc2 = 0; rc2 < BLOCK_SIZE; rc2++){
C[threadIdx.y][threadIdx.x] += A[threadIdx.y][rc2] * B[rc2][threadIdx.x];
}
__syncthreads();
}
d_C[i*n + j] = C[threadIdx.y][threadIdx.x];
}
extern "C" {
#include <cblas.h>
void matmult_lib(int m, int n, int k,double* A, double* B, double* C){
cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, m, n, k, 1.0, A, k, B, n, 0.0, C, n);
//cblas_dgemm(CblasColMajor, CblasNoTrans, CblasNoTrans, n, m, k, 1.0, B, n, A, k, 0.0, C, n);
}
void matmult_gpulib(int m, int n, int k,double* A, double* B, double* C){
double* d_A; cudaMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; cudaMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; cudaMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
cudaMemcpy(d_A, A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, k*n*sizeof(double), cudaMemcpyHostToDevice);
// Do the actual multiplication
cublasDgemm('n', 'n', n, m, k, 1.0, d_B, n, d_A, k, 0.0, d_C, n);
checkCudaErrors(cudaDeviceSynchronize());
// Transfer results from device to host
cudaMemcpy(C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_C);
cudaFree(d_A);
cudaFree(d_B);
}
void matmult_gpu5(int m, int n, int k, double* h_A, double* h_B, double* h_C){
double* d_A; cudaMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; cudaMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; cudaMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
cudaMemcpy(d_A, h_A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k*n*sizeof(double), cudaMemcpyHostToDevice);
dim3 NUM_BLOCKS = dim3(m/32, n/32, 1);
dim3 NUM_THREADS = dim3(32, 32, 1);
// Kernel launch
gpu5<<<NUM_BLOCKS, NUM_THREADS>>>(m, n, k, d_A, d_B, d_C);
checkCudaErrors(cudaDeviceSynchronize());
// Transfer results from device to host
cudaMemcpy(h_C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_C);
cudaFree(d_A);
cudaFree(d_B);
}
void matmult_gpu4(int m, int n, int k, double* h_A, double* h_B, double* h_C){
double* d_A; cudaMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; cudaMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; cudaMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
cudaMemcpy(d_A, h_A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k*n*sizeof(double), cudaMemcpyHostToDevice);
int blockx = 0, blocky = 0;
if(m%128)
blockx = 1;
if (n%32)
blocky = 1;
dim3 NUM_BLOCKS = dim3(m/128 + blockx, n/32 + blocky, 1);
dim3 NUM_THREADS = dim3(32, 32, 1);
if (m*n/4 <= 1024){
NUM_BLOCKS = dim3(1, 1, 1);
blockx = 0;
if (m%4)
blockx = 1;
NUM_THREADS = dim3(m/4 + blockx, n, 1);
}
// Kernel launch
gpu4<<<NUM_BLOCKS, NUM_THREADS>>>(m, n, k, d_A, d_B, d_C);
checkCudaErrors(cudaDeviceSynchronize());
// Transfer results from device to host
cudaMemcpy(h_C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_C);
cudaFree(d_A);
cudaFree(d_B);
}
void matmult_gpu3(int m, int n, int k, double* h_A, double* h_B, double* h_C){
double* d_A; cudaMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; cudaMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; cudaMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
cudaMemcpy(d_A, h_A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k*n*sizeof(double), cudaMemcpyHostToDevice);
int blockx = 0, blocky = 0;
if(m%64)
blockx = 1;
if (n%32)
blocky = 1;
dim3 NUM_BLOCKS = dim3(m/64 + blockx, n/32 + blocky, 1);
dim3 NUM_THREADS = dim3(32, 32, 1);
if (m*n/2 <= 1024){
NUM_BLOCKS = dim3(1, 1, 1);
NUM_THREADS = dim3(m/2 + m % 2, n, 1);
}
// Kernel launch
gpu3<<<NUM_BLOCKS, NUM_THREADS>>>(m, n, k, d_A, d_B, d_C);
checkCudaErrors(cudaDeviceSynchronize());
// Transfer results from device to host
cudaMemcpy(h_C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_C);
cudaFree(d_A);
cudaFree(d_B);
}
void matmult_gpu2(int m, int n, int k, double* h_A, double* h_B, double* h_C){
double* d_A; cudaMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; cudaMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; cudaMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
cudaMemcpy(d_A, h_A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k*n*sizeof(double), cudaMemcpyHostToDevice);
int blockx = 0, blocky = 0;
if(m%32)
blockx = 1;
if (n%32)
blocky = 1;
dim3 NUM_BLOCKS = dim3(m/32 + blockx, n/32 + blocky, 1);
dim3 NUM_THREADS = dim3(32, 32, 1);
if (m*n <= 1024){
NUM_BLOCKS = dim3(1, 1, 1);
NUM_THREADS = dim3(m, n, 1);
}
// Kernel launch
gpu2<<<NUM_BLOCKS, NUM_THREADS>>>(m, n, k, d_A, d_B, d_C);
checkCudaErrors(cudaDeviceSynchronize());
// Transfer results from device to host
cudaMemcpy(h_C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_C);
cudaFree(d_A);
cudaFree(d_B);
}
void matmult_gpu1(int m, int n, int k, double* h_A, double* h_B, double* h_C ){
double* d_A; cudaMalloc((void**)&d_A, m*k*sizeof(double));
double* d_B; cudaMalloc((void**)&d_B, k*n*sizeof(double));
double* d_C; cudaMalloc((void**)&d_C, m*n*sizeof(double));
// Transfer data from host to device
cudaMemcpy(d_A, h_A, m*k*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, k*n*sizeof(double), cudaMemcpyHostToDevice);
// Kernel launch
gpu1<<<1,1>>>(m, n, k, d_A, d_B, d_C);
checkCudaErrors(cudaDeviceSynchronize());
// Transfer results from device to host
cudaMemcpy(h_C, d_C, m*n*sizeof(double), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_C);
cudaFree(d_A);
cudaFree(d_B);
}
} // end extern C
|
55d2b2c9c3a2031f8fe362236260d8bbcd0994a3.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include "CStructs.h"
extern "C" void launchCuda(int* a, int* b, int*c, int n);
extern "C" void launchCudaMatrix(size_t numLoops, SimpleMatrix** myMatrix1, SimpleMatrix** myMatrix2);
//extern "C" void launchCudaMatrix(size_t numLoops, int** myMatrix1, int** myMatrix2);
extern "C" void launchCudaPitch(int width, int height, size_t pitch, int* d_tab);
extern "C" void launchCudaMatVecMult(double *d_constMatrix, double* d_varArray,double* d_gatherArray,
dim3 blockSize, dim3 gridSize, int nx, int ny, int nz);
extern "C" void launchCudaPVecReduce(double *d_constMatrix, double* d_varArray, double* d_gatherArray,
dim3 blockSize, dim3 gridSize, int nx, int ny, int nz, double*outArray, bool shared = true);
// device function to set the 3D volume
__global__ void pMatVecMult(double *cM, double *varA, double *gatherA, int nx, int ny, int nz)
{
unsigned layer = blockIdx.x*blockDim.x + threadIdx.x;
unsigned row = blockIdx.y*blockDim.y + threadIdx.y;
double tempSum = 0.;
if ((layer < nx) &&(row < ny)) {
for (size_t col = 0; col < nz; ++col) {
tempSum += cM[row*nz + col] * varA[layer*nz + col];
}
gatherA[layer*ny+ row] = tempSum;
}
}
__global__ void pVecReducedGlobal(double *gatherA, int nx, int ny, int n, double* out)
{
size_t t_id = ny * (threadIdx.x + blockDim.x*blockIdx.x );
for(size_t s = nx / 2; s > 0; s >>= 1) {
if (t_id < s*ny) {
for (size_t row = 0; row < ny; ++row) {
gatherA[t_id+row] += gatherA[t_id + s*ny+row];
}
}
__syncthreads();
}
if (t_id == 0) {
for (size_t row = 0; row < ny; ++row) {
out[row] = gatherA[t_id+row];
}
}
}
__global__ void pVecReduceShared(double *gatherA, int nx, int ny, int n, double* out)
{
extern double __shared__ sdata[];
size_t t_id = ny * (threadIdx.x + blockDim.x*blockIdx.x);
for (size_t row = 0; row < ny; ++row) {
sdata[t_id+row] = gatherA[t_id+row];
sdata[t_id + nx*ny / 2+row] = gatherA[t_id + nx*ny / 2+row];
}
__syncthreads();
for (size_t s = nx / 2; s > 0; s >>= 1) {
if (t_id < s*ny) {
for (size_t row = 0; row < ny; ++row) {
sdata[t_id + row] += sdata[t_id + s * ny + row];
}
}
__syncthreads();
}
if (t_id == 0) {
for (size_t row = 0; row < ny; ++row) {
out[row] = sdata[t_id + row];
}
}
}
void launchCudaMatVecMult(double *d_constMatrix, double* d_varArray, double* d_gatherArray,
dim3 blockSize, dim3 gridSize, int nx, int ny, int nz) {
pMatVecMult << <gridSize, blockSize >> > (d_constMatrix, d_varArray, d_gatherArray, nx, ny, nz);
hipDeviceSynchronize();
}
void launchCudaPVecReduce(double *d_constMatrix, double* d_varArray, double* d_gatherArray,
dim3 blockSize, dim3 gridSize, int nx, int ny, int nz, double*outArray,bool shared) {
if (shared) {
pVecReduceShared << <gridSize, blockSize, nx*ny*sizeof(double) >> > (d_gatherArray, nx, ny, nz, outArray);
}
else
{
pVecReducedGlobal << <gridSize, blockSize >> > (d_gatherArray, nx, ny, nz, outArray);
}
hipDeviceSynchronize();
}
__global__ void pitchEx2D_1(int* tab, int width, int height, size_t pitch) {
int row = threadIdx.x + blockIdx.x * blockDim.x;
int col = threadIdx.y + blockIdx.y * blockDim.y;
if (row < width && col < height) {
*(((int *)(((char *)tab) + (row * pitch))) + col) = 9;
}
}
void launchCudaPitch(int width, int height, size_t pitch, int* d_tab) {
dim3 grid(width, height);
dim3 block(width, height);
pitchEx2D_1 << <grid, block >> > (d_tab, width, height, pitch);
hipDeviceSynchronize();
}
//Sample CUDA function
__global__ void vectorAdd(int* a, int* b, int*c, int n) {
int i = threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
void launchCuda(int* a, int* b, int*c, int n) {
hipLaunchKernelGGL(( vectorAdd) , dim3(1), dim3(n) , 0, 0, a, b, c, n);
hipDeviceSynchronize();
}
__global__ void simpleMatrixAdd(size_t numLoops, SimpleMatrix** myMatrix1, SimpleMatrix** myMatrix2,size_t numRows, size_t numCols) {
// size_t numRows = myMatrix1[0]->numRows; //Not sure if this was important, since you were not setting numRows or numCols
// size_t numCols = myMatrix1[0]->numCols;
int loops = threadIdx.x;
myMatrix1[1]->matrix[2] = 200.;
if (loops < numLoops) {
for (size_t row = 0; row < numRows; ++row) {
for (size_t col = 0; col <numCols; ++col) {
myMatrix1[loops]->matrix[row*numCols + col] = myMatrix1[loops]->matrix[row*numCols + col] +
myMatrix2[loops]->matrix[row*numCols + col];
}
}
}
myMatrix1[1]->matrix[3] = 200.;
}
void launchCudaMatrix(size_t numLoops, SimpleMatrix** myMatrix1, SimpleMatrix** myMatrix2) {
myMatrix1[1]->matrix[0] = 100.;
size_t numIters = 256;
size_t blocks = numLoops / numIters;
printf("\nCalling kernel with numLoops = %d",numLoops);
simpleMatrixAdd << < blocks , numIters >> > (numLoops, myMatrix1, myMatrix2, myMatrix1[0]->numRows, myMatrix1[0]->numCols);
hipDeviceSynchronize();
myMatrix1[1]->matrix[1] = 100.;
}
void matVecMultC(double** weights, double* inVector, int numRows, int numCols, double* p1) {
for (int rowCount = 0; rowCount < numRows; ++rowCount) {
double tempSum = 0;
for (int colCount = 0; colCount < numCols; ++colCount) {
tempSum += weights[rowCount][colCount] * inVector[colCount];
}
p1[rowCount] = tempSum;
}
}
void plusEqualsStruct(CNNStructureC *holdAccumGradients1, CNNStructureC *holdAccumGradients2) {
for (size_t numLayer = 0; numLayer < holdAccumGradients1->weights.numLayers; ++numLayer) {
for (size_t numRow = 0; numRow < holdAccumGradients1->weights.numRows[numLayer]; ++numRow) {
for (size_t numCol = 0; numCol < holdAccumGradients1->weights.numCols[numLayer]; ++numCol) {
holdAccumGradients1->weights.values[numLayer][numRow][numCol] +=
holdAccumGradients2->weights.values[numLayer][numRow][numCol];
}
}
}
};
void setWeightsToZeros(CNNStructureC *input) {
//Assumes input already has memory created.
for (int layer = 0; layer < input->weights.numLayers - 1; ++layer) {
for (int row = 0; row < input->weights.numRows[layer]; ++row) {
for (int col = 0; col < input->weights.numCols[layer]; ++col) {
input->weights.values[layer][row][col] = 0;
}
}
}
};
void plusEqualsStruct(CNNStructureCFlat *holdAccumGradients1, CNNStructureCFlat *holdAccumGradients2) {
size_t indexFlatten;
for (size_t numLayer = 0; numLayer < holdAccumGradients1->weights.numLayers; ++numLayer) {
for (size_t numRow = 0; numRow < holdAccumGradients1->weights.numRows[numLayer]; ++numRow) {
for (size_t numCol = 0; numCol < holdAccumGradients1->weights.numCols[numLayer]; ++numCol) {
indexFlatten = holdAccumGradients1->weights.startLayer[numLayer] +
numRow * holdAccumGradients1->weights.numCols[numLayer] + numCol;
holdAccumGradients1->weights.values[indexFlatten] +=
holdAccumGradients2->weights.values[indexFlatten];
}
}
}
};
void setWeightsToZeros(CNNStructureCFlat *input) {
//Assumes input already has memory created.
for (int layer = 0; layer < input->weights.numLayers - 1; ++layer) {
for (int row = 0; row < input->weights.numRows[layer]; ++row) {
for (int col = 0; col < input->weights.numCols[layer]; ++col) {
input->weights.values[input->weights.startLayer[layer] + row * input->weights.numCols[layer] + col] = 0;
}
}
}
};
void structMatVecMult(CNNStructureC* inStruct, reducedLayerNodesC* nodesForUpdating, double* inVector, int numLayer, int whichCase) {
int numCols = inStruct->weights.numCols[numLayer];
int numRows = inStruct->weights.numRows[numLayer];
for (int rowCount = 0; rowCount < numRows; ++rowCount) {
double tempSum = 0;
for (int colCount = 0; colCount < numCols; ++colCount) {
tempSum += inStruct->weights.values[numLayer][rowCount][colCount] * inVector[colCount];
}
//Sigma function
if (tempSum < 0) {
tempSum = 0; //Comment this if you want to kill it.
}
nodesForUpdating->nodes[whichCase][numLayer][rowCount] = tempSum; //Assumes numLayer starts at 0.
// inStruct->layerNodes.values[numLayer + 1][rowCount] = tempSum;
}
};
void updateLayersC(CNNStructureC * testStruct, dataC* data, reducedLayerNodesC* nodesForUpdating, int whichCase) {
//input contains the starting layer nodes.
// testStruct->layerNodes.values[0] = data->inputNodes[whichCase];
double* tempLayer;
tempLayer = data->inputNodes[whichCase];
for (int layerCount = 0; layerCount < testStruct->weights.numLayers; ++layerCount) {
structMatVecMult(testStruct, nodesForUpdating, tempLayer, layerCount, whichCase);
tempLayer = nodesForUpdating->nodes[whichCase][layerCount];
// tempLayer = testStruct->layerNodes.values[layerCount + 1];
}
}
double calcCostC(CNNStructureC* inputStruct, dataC* inputData, reducedLayerNodesC* nodesForUpdating, int whichCase, bool updateLayersBool) {
//This version uses the input to update the layers and then calculate the cost.
if (updateLayersBool) {
updateLayersC(inputStruct, inputData, nodesForUpdating, whichCase);
}
//The cost only depends on the last layer's nodes. And there is no addition term added to the host_vector.
double costSum = 0.;
size_t numLayers = nodesForUpdating->numLayers;
// size_t numLayers = inputStruct->layerNodes.numLayers;
size_t desiredSize = inputData->numOutputs;
for (int iCnt = 0; iCnt < desiredSize - 1; ++iCnt) { // Cut down by one because desired has the extra 1.
// It doesn't really matter since both have 1 at the end.
costSum += pow((nodesForUpdating->nodes[whichCase][numLayers-1][iCnt] - inputData->outputNodes[whichCase][iCnt]), 2);
// costSum += pow((inputStruct->layerNodes.values[numLayers - 1][iCnt] - inputData->outputNodes[whichCase][iCnt]), 2);
}
return(costSum);
}
void makeGradPassC(CNNStructureC* testStruct, CNNStructureC* tempGradStruct, reducedLayerNodesC* nodesForUpdating,
dataC* data, size_t whichCase);
void makeGradPassCUDA(CNNStructureC* testStruct, CNNStructureCFlat* tempGradStruct, reducedLayerNodesC* nodesForUpdating,
dataC* data, size_t whichCase);
extern "C" void calcGradientPartsC(CNNStructureC holdAccumGradients[], structureC<int> *testCase, dataC* data,
CNNStructureC *testStruct, size_t begin, double* cost, reducedLayerNodesC* nodesForUpdating,
const size_t numBlocks, const size_t numThreads) {
size_t end = numBlocks * numThreads;
for (size_t iCnt = 1; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
setWeightsToZeros(&holdAccumGradients[iCnt]); // Zero this out because of the += later.
}
double tempCost = 0;
size_t tSet;
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
updateLayersC(testStruct, data, nodesForUpdating, tSet);
}
}
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
tempCost += calcCostC(testStruct, data, nodesForUpdating, tSet, false);
}
}
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
// Add to holdAccumGradients for this test set.
makeGradPassC(testStruct, &holdAccumGradients[iCnt], nodesForUpdating, data, tSet);
}
}
//For the moment, just gather them up, here.
for (size_t iCnt = 1; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
plusEqualsStruct(&holdAccumGradients[0], &holdAccumGradients[iCnt]);
}
*cost = tempCost / double(end - begin);
printf("\nCost in calcGradientPartsC ");
printf("\t%f", *cost);
}
extern "C" void calcGradientPartsCUDA(CNNStructureCFlat holdAccumGradients[], structureC<int> *testCase, dataC* data,
CNNStructureC *testStruct, size_t begin, double* cost, reducedLayerNodesC* nodesForUpdating,
const size_t numBlocks, const size_t numThreads) {
size_t end = numBlocks * numThreads;
// Set up (zero out) numThreads number of holdAccumGradients. Each will be added to as you work towards
// the average over all the training sets.
for (size_t iCnt = 1; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
setWeightsToZeros(&holdAccumGradients[iCnt]); // Zero this out because of the += later.
}
// launchCUDAGradientsZero(d_holdAccumGradientsCFlatWeights);
double tempCost = 0;
size_t tSet;
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
updateLayersC(testStruct, data, nodesForUpdating, tSet);
}
}
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
tempCost += calcCostC(testStruct, data, nodesForUpdating, tSet, false);
}
}
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
// Add to holdAccumGradients for this test set.
makeGradPassCUDA(testStruct, &holdAccumGradients[iCnt], nodesForUpdating, data, tSet);
}
}
//For the moment, just gather them up, here.
for (size_t iCnt = 1; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
plusEqualsStruct(&holdAccumGradients[0], &holdAccumGradients[iCnt]);
}
*cost = tempCost / double(end - begin);
printf("\nCost in calcGradientPartsCUDA ");
printf("\t%f", *cost);
}
void makeGradPassC(CNNStructureC* testStruct, CNNStructureC* tempGradStruct, reducedLayerNodesC* nodesForUpdating,
dataC* data, size_t whichCase){
// The goal here is to create the gradient for the single test case.
// There are multiple terms that need to be multiplied
// together to form each element. Complete a layer (from back to front)
// before proceding to the next layer. The reason is that you need the results of layer L
// inorder to get a cost for L-1.
/*Memory consideration: I have three vectors (pCPA, partRelu, and temppCpA that will be taking on varying size
as I go through the layers. Rather than resizing the vectors, I'm just going to allocate the memory for the largest
value that they will take on. Note, that I could define these once outside of here. However, at this point in my understanding,
I think that would introduce a problem when going highly parallel, i.e., each thread should have its own copy.
A likely more efficient way would be to make an array of each of these, then send one each into this function. Ideally,
you could leave them in place (not have to move them on, off, or create on the device each time you run this.*/
/*In going parallel, I now get the first layer nodes from the data input nodes. Then, the rest of the nodes (hidden layers and
output nodes) come from the reducedForUpdate nodes. */
//Find the largest size vector (Note, that is a number that could be precalculated and sent in).
size_t maxNodes = 0;
for (size_t iCnt = 0; iCnt < testStruct->layerNodes.numLayers; ++iCnt) {
if (testStruct->layerNodes.numNodes[iCnt] > maxNodes) {
maxNodes = testStruct->layerNodes.numNodes[iCnt];
}
}
double* pCpA = (double*)malloc(maxNodes* sizeof(double));
double* partRelu = (double*)malloc(sizeof(double)*maxNodes);
double* temppCpA = (double*)malloc(sizeof(double)*maxNodes);
size_t startLayer = testStruct->layerNodes.numLayers - 1;
for (size_t iCnt = 0; iCnt < testStruct->layerNodes.numNodes[startLayer]; ++iCnt) {
pCpA[iCnt] = 2.*(nodesForUpdating->nodes[whichCase][startLayer-1][iCnt] - data->outputNodes[whichCase][iCnt]);
// pCpA[iCnt] = 2.*(testStruct->layerNodes.values[startLayer][iCnt] - desired[iCnt]);
}
for (size_t layerCount = startLayer; layerCount > 0; --layerCount) {
if (layerCount == 1) {
matVecMultC(testStruct->weights.values[layerCount - 1],
data->inputNodes[whichCase], testStruct->weights.numRows[layerCount - 1],
testStruct->weights.numCols[layerCount - 1], partRelu);
}
else
{
matVecMultC(testStruct->weights.values[layerCount - 1],
nodesForUpdating->nodes[whichCase][layerCount - 2], testStruct->weights.numRows[layerCount - 1],
testStruct->weights.numCols[layerCount - 1], partRelu);
}
//The implication of these next lines would seem to be that you do not use the final layer in calculating partRelu.
//I have carried that into the above.
// matVecMultC(testStruct->weights.values[layerCount - 1],
// testStruct->layerNodes.values[layerCount - 1], testStruct->weights.numRows[layerCount - 1],
// testStruct->weights.numCols[layerCount - 1], partRelu);
//Sigma
for (size_t rowCount = 0; rowCount < testStruct->weights.numRows[layerCount - 1] - 1; ++rowCount) {
if (partRelu[rowCount] < 0.) {
partRelu[rowCount] = 0.;
}
else {
partRelu[rowCount] = 1.;
}
// partRelu[rowCount] = 1.; //uncomment here and comment above to Kill sigma till you understand it.
for (size_t colCount = 0; colCount < tempGradStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
//(partial z wrt w)*partial relu*pCpA
if (layerCount == 1) {
tempGradStruct->weights.values[layerCount - 1][rowCount][colCount] +=
data->inputNodes[whichCase][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
else {
tempGradStruct->weights.values[layerCount - 1][rowCount][colCount] +=
nodesForUpdating->nodes[whichCase][layerCount - 2][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
// tempGradStruct->weights.values[layerCount - 1][rowCount][colCount] +=
// testStruct->layerNodes.values[layerCount - 1][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
// Each row also has a bias term at the end of the row.
tempGradStruct->weights.values[layerCount - 1][rowCount][testStruct->weights.numCols[layerCount - 1] - 1] +=
partRelu[rowCount] * pCpA[rowCount];
}
if (layerCount > 1) {
//Calculate the pCpA host_vector for the next round.
for (size_t colCount = 0; colCount < testStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
double tempSum = 0.;
for (size_t rowCount = 0; rowCount < testStruct->weights.numRows[layerCount - 1] - 1; ++rowCount) {
tempSum += testStruct->weights.values[layerCount - 1][rowCount][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
temppCpA[colCount] = tempSum;
}
for (size_t colCount = 0; colCount < testStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
pCpA[colCount] = temppCpA[colCount];
}
}
}
free(pCpA);
free(partRelu);
free(temppCpA);
}
void makeGradPassCUDA(CNNStructureC* testStruct, CNNStructureCFlat* tempGradStruct, reducedLayerNodesC* nodesForUpdating,
dataC* data, size_t whichCase) {
// The goal here is to create the gradient for the single test case.
// There are multiple terms that need to be multiplied
// together to form each element. Complete a layer (from back to front)
// before proceding to the next layer. The reason is that you need the results of layer L
// inorder to get a cost for L-1.
/*Memory consideration: I have three vectors (pCPA, partRelu, and temppCpA that will be taking on varying size
as I go through the layers. Rather than resizing the vectors, I'm just going to allocate the memory for the largest
value that they will take on. Note, that I could define these once outside of here. However, at this point in my understanding,
I think that would introduce a problem when going highly parallel, i.e., each thread should have its own copy.
A likely more efficient way would be to make an array of each of these, then send one each into this function. Ideally,
you could leave them in place (not have to move them on, off, or create on the device each time you run this.*/
/*In going parallel, I now get the first layer nodes from the data input nodes. Then, the rest of the nodes (hidden layers and
output nodes) come from the reducedForUpdate nodes. */
//Find the largest size vector (Note, that is a number that could be precalculated and sent in).
size_t indexFlatten;
size_t maxNodes = 0;
for (size_t iCnt = 0; iCnt < testStruct->layerNodes.numLayers; ++iCnt) {
if (testStruct->layerNodes.numNodes[iCnt] > maxNodes) {
maxNodes = testStruct->layerNodes.numNodes[iCnt];
}
}
double* pCpA = (double*)malloc(maxNodes * sizeof(double));
double* partRelu = (double*)malloc(sizeof(double)*maxNodes);
double* temppCpA = (double*)malloc(sizeof(double)*maxNodes);
size_t startLayer = testStruct->layerNodes.numLayers - 1;
for (size_t iCnt = 0; iCnt < testStruct->layerNodes.numNodes[startLayer]; ++iCnt) {
pCpA[iCnt] = 2.*(nodesForUpdating->nodes[whichCase][startLayer - 1][iCnt] - data->outputNodes[whichCase][iCnt]);
// pCpA[iCnt] = 2.*(testStruct->layerNodes.values[startLayer][iCnt] - desired[iCnt]);
}
for (size_t layerCount = startLayer; layerCount > 0; --layerCount) {
if (layerCount == 1) {
matVecMultC(testStruct->weights.values[layerCount - 1],
data->inputNodes[whichCase], testStruct->weights.numRows[layerCount - 1],
testStruct->weights.numCols[layerCount - 1], partRelu);
}
else
{
matVecMultC(testStruct->weights.values[layerCount - 1],
nodesForUpdating->nodes[whichCase][layerCount - 2], testStruct->weights.numRows[layerCount - 1],
testStruct->weights.numCols[layerCount - 1], partRelu);
}
//The implication of these next lines would seem to be that you do not use the final layer in calculating partRelu.
//I have carried that into the above.
// matVecMultC(testStruct->weights.values[layerCount - 1],
// testStruct->layerNodes.values[layerCount - 1], testStruct->weights.numRows[layerCount - 1],
// testStruct->weights.numCols[layerCount - 1], partRelu);
//Sigma
for (size_t rowCount = 0; rowCount < testStruct->weights.numRows[layerCount - 1] - 1; ++rowCount) {
if (partRelu[rowCount] < 0.) {
partRelu[rowCount] = 0.;
}
else {
partRelu[rowCount] = 1.;
}
// partRelu[rowCount] = 1.; //uncomment here and comment above to Kill sigma till you understand it.
for (size_t colCount = 0; colCount < tempGradStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
//(partial z wrt w)*partial relu*pCpA
if (layerCount == 1) {
indexFlatten = tempGradStruct->weights.startLayer[layerCount - 1] +
tempGradStruct->weights.numCols[layerCount-1]*rowCount + colCount;
tempGradStruct->weights.values[indexFlatten] +=
data->inputNodes[whichCase][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
else {
indexFlatten = tempGradStruct->weights.startLayer[layerCount - 1] +
tempGradStruct->weights.numCols[layerCount - 1] * rowCount + colCount;
tempGradStruct->weights.values[indexFlatten] +=
nodesForUpdating->nodes[whichCase][layerCount - 2][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
// tempGradStruct->weights.values[layerCount - 1][rowCount][colCount] +=
// testStruct->layerNodes.values[layerCount - 1][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
// Each row also has a bias term at the end of the row.
indexFlatten = tempGradStruct->weights.startLayer[layerCount - 1] +
tempGradStruct->weights.numCols[layerCount - 1] * rowCount + testStruct->weights.numCols[layerCount - 1] - 1;
tempGradStruct->weights.values[indexFlatten] += partRelu[rowCount] * pCpA[rowCount];
}
if (layerCount > 1) {
//Calculate the pCpA host_vector for the next round.
for (size_t colCount = 0; colCount < testStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
double tempSum = 0.;
for (size_t rowCount = 0; rowCount < testStruct->weights.numRows[layerCount - 1] - 1; ++rowCount) {
tempSum += testStruct->weights.values[layerCount - 1][rowCount][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
temppCpA[colCount] = tempSum;
}
for (size_t colCount = 0; colCount < testStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
pCpA[colCount] = temppCpA[colCount];
}
}
}
free(pCpA);
free(partRelu);
free(temppCpA);
}
|
55d2b2c9c3a2031f8fe362236260d8bbcd0994a3.cu
|
/**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda_runtime.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#include <helper_cuda.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include "CStructs.h"
extern "C" void launchCuda(int* a, int* b, int*c, int n);
extern "C" void launchCudaMatrix(size_t numLoops, SimpleMatrix** myMatrix1, SimpleMatrix** myMatrix2);
//extern "C" void launchCudaMatrix(size_t numLoops, int** myMatrix1, int** myMatrix2);
extern "C" void launchCudaPitch(int width, int height, size_t pitch, int* d_tab);
extern "C" void launchCudaMatVecMult(double *d_constMatrix, double* d_varArray,double* d_gatherArray,
dim3 blockSize, dim3 gridSize, int nx, int ny, int nz);
extern "C" void launchCudaPVecReduce(double *d_constMatrix, double* d_varArray, double* d_gatherArray,
dim3 blockSize, dim3 gridSize, int nx, int ny, int nz, double*outArray, bool shared = true);
// device function to set the 3D volume
__global__ void pMatVecMult(double *cM, double *varA, double *gatherA, int nx, int ny, int nz)
{
unsigned layer = blockIdx.x*blockDim.x + threadIdx.x;
unsigned row = blockIdx.y*blockDim.y + threadIdx.y;
double tempSum = 0.;
if ((layer < nx) &&(row < ny)) {
for (size_t col = 0; col < nz; ++col) {
tempSum += cM[row*nz + col] * varA[layer*nz + col];
}
gatherA[layer*ny+ row] = tempSum;
}
}
__global__ void pVecReducedGlobal(double *gatherA, int nx, int ny, int n, double* out)
{
size_t t_id = ny * (threadIdx.x + blockDim.x*blockIdx.x );
for(size_t s = nx / 2; s > 0; s >>= 1) {
if (t_id < s*ny) {
for (size_t row = 0; row < ny; ++row) {
gatherA[t_id+row] += gatherA[t_id + s*ny+row];
}
}
__syncthreads();
}
if (t_id == 0) {
for (size_t row = 0; row < ny; ++row) {
out[row] = gatherA[t_id+row];
}
}
}
__global__ void pVecReduceShared(double *gatherA, int nx, int ny, int n, double* out)
{
extern double __shared__ sdata[];
size_t t_id = ny * (threadIdx.x + blockDim.x*blockIdx.x);
for (size_t row = 0; row < ny; ++row) {
sdata[t_id+row] = gatherA[t_id+row];
sdata[t_id + nx*ny / 2+row] = gatherA[t_id + nx*ny / 2+row];
}
__syncthreads();
for (size_t s = nx / 2; s > 0; s >>= 1) {
if (t_id < s*ny) {
for (size_t row = 0; row < ny; ++row) {
sdata[t_id + row] += sdata[t_id + s * ny + row];
}
}
__syncthreads();
}
if (t_id == 0) {
for (size_t row = 0; row < ny; ++row) {
out[row] = sdata[t_id + row];
}
}
}
void launchCudaMatVecMult(double *d_constMatrix, double* d_varArray, double* d_gatherArray,
dim3 blockSize, dim3 gridSize, int nx, int ny, int nz) {
pMatVecMult << <gridSize, blockSize >> > (d_constMatrix, d_varArray, d_gatherArray, nx, ny, nz);
cudaDeviceSynchronize();
}
void launchCudaPVecReduce(double *d_constMatrix, double* d_varArray, double* d_gatherArray,
dim3 blockSize, dim3 gridSize, int nx, int ny, int nz, double*outArray,bool shared) {
if (shared) {
pVecReduceShared << <gridSize, blockSize, nx*ny*sizeof(double) >> > (d_gatherArray, nx, ny, nz, outArray);
}
else
{
pVecReducedGlobal << <gridSize, blockSize >> > (d_gatherArray, nx, ny, nz, outArray);
}
cudaDeviceSynchronize();
}
__global__ void pitchEx2D_1(int* tab, int width, int height, size_t pitch) {
int row = threadIdx.x + blockIdx.x * blockDim.x;
int col = threadIdx.y + blockIdx.y * blockDim.y;
if (row < width && col < height) {
*(((int *)(((char *)tab) + (row * pitch))) + col) = 9;
}
}
void launchCudaPitch(int width, int height, size_t pitch, int* d_tab) {
dim3 grid(width, height);
dim3 block(width, height);
pitchEx2D_1 << <grid, block >> > (d_tab, width, height, pitch);
cudaDeviceSynchronize();
}
//Sample CUDA function
__global__ void vectorAdd(int* a, int* b, int*c, int n) {
int i = threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
void launchCuda(int* a, int* b, int*c, int n) {
vectorAdd <<< 1, n >>> (a, b, c, n);
cudaDeviceSynchronize();
}
__global__ void simpleMatrixAdd(size_t numLoops, SimpleMatrix** myMatrix1, SimpleMatrix** myMatrix2,size_t numRows, size_t numCols) {
// size_t numRows = myMatrix1[0]->numRows; //Not sure if this was important, since you were not setting numRows or numCols
// size_t numCols = myMatrix1[0]->numCols;
int loops = threadIdx.x;
myMatrix1[1]->matrix[2] = 200.;
if (loops < numLoops) {
for (size_t row = 0; row < numRows; ++row) {
for (size_t col = 0; col <numCols; ++col) {
myMatrix1[loops]->matrix[row*numCols + col] = myMatrix1[loops]->matrix[row*numCols + col] +
myMatrix2[loops]->matrix[row*numCols + col];
}
}
}
myMatrix1[1]->matrix[3] = 200.;
}
void launchCudaMatrix(size_t numLoops, SimpleMatrix** myMatrix1, SimpleMatrix** myMatrix2) {
myMatrix1[1]->matrix[0] = 100.;
size_t numIters = 256;
size_t blocks = numLoops / numIters;
printf("\nCalling kernel with numLoops = %d",numLoops);
simpleMatrixAdd << < blocks , numIters >> > (numLoops, myMatrix1, myMatrix2, myMatrix1[0]->numRows, myMatrix1[0]->numCols);
cudaDeviceSynchronize();
myMatrix1[1]->matrix[1] = 100.;
}
void matVecMultC(double** weights, double* inVector, int numRows, int numCols, double* p1) {
for (int rowCount = 0; rowCount < numRows; ++rowCount) {
double tempSum = 0;
for (int colCount = 0; colCount < numCols; ++colCount) {
tempSum += weights[rowCount][colCount] * inVector[colCount];
}
p1[rowCount] = tempSum;
}
}
void plusEqualsStruct(CNNStructureC *holdAccumGradients1, CNNStructureC *holdAccumGradients2) {
for (size_t numLayer = 0; numLayer < holdAccumGradients1->weights.numLayers; ++numLayer) {
for (size_t numRow = 0; numRow < holdAccumGradients1->weights.numRows[numLayer]; ++numRow) {
for (size_t numCol = 0; numCol < holdAccumGradients1->weights.numCols[numLayer]; ++numCol) {
holdAccumGradients1->weights.values[numLayer][numRow][numCol] +=
holdAccumGradients2->weights.values[numLayer][numRow][numCol];
}
}
}
};
void setWeightsToZeros(CNNStructureC *input) {
//Assumes input already has memory created.
for (int layer = 0; layer < input->weights.numLayers - 1; ++layer) {
for (int row = 0; row < input->weights.numRows[layer]; ++row) {
for (int col = 0; col < input->weights.numCols[layer]; ++col) {
input->weights.values[layer][row][col] = 0;
}
}
}
};
void plusEqualsStruct(CNNStructureCFlat *holdAccumGradients1, CNNStructureCFlat *holdAccumGradients2) {
size_t indexFlatten;
for (size_t numLayer = 0; numLayer < holdAccumGradients1->weights.numLayers; ++numLayer) {
for (size_t numRow = 0; numRow < holdAccumGradients1->weights.numRows[numLayer]; ++numRow) {
for (size_t numCol = 0; numCol < holdAccumGradients1->weights.numCols[numLayer]; ++numCol) {
indexFlatten = holdAccumGradients1->weights.startLayer[numLayer] +
numRow * holdAccumGradients1->weights.numCols[numLayer] + numCol;
holdAccumGradients1->weights.values[indexFlatten] +=
holdAccumGradients2->weights.values[indexFlatten];
}
}
}
};
void setWeightsToZeros(CNNStructureCFlat *input) {
//Assumes input already has memory created.
for (int layer = 0; layer < input->weights.numLayers - 1; ++layer) {
for (int row = 0; row < input->weights.numRows[layer]; ++row) {
for (int col = 0; col < input->weights.numCols[layer]; ++col) {
input->weights.values[input->weights.startLayer[layer] + row * input->weights.numCols[layer] + col] = 0;
}
}
}
};
void structMatVecMult(CNNStructureC* inStruct, reducedLayerNodesC* nodesForUpdating, double* inVector, int numLayer, int whichCase) {
int numCols = inStruct->weights.numCols[numLayer];
int numRows = inStruct->weights.numRows[numLayer];
for (int rowCount = 0; rowCount < numRows; ++rowCount) {
double tempSum = 0;
for (int colCount = 0; colCount < numCols; ++colCount) {
tempSum += inStruct->weights.values[numLayer][rowCount][colCount] * inVector[colCount];
}
//Sigma function
if (tempSum < 0) {
tempSum = 0; //Comment this if you want to kill it.
}
nodesForUpdating->nodes[whichCase][numLayer][rowCount] = tempSum; //Assumes numLayer starts at 0.
// inStruct->layerNodes.values[numLayer + 1][rowCount] = tempSum;
}
};
void updateLayersC(CNNStructureC * testStruct, dataC* data, reducedLayerNodesC* nodesForUpdating, int whichCase) {
//input contains the starting layer nodes.
// testStruct->layerNodes.values[0] = data->inputNodes[whichCase];
double* tempLayer;
tempLayer = data->inputNodes[whichCase];
for (int layerCount = 0; layerCount < testStruct->weights.numLayers; ++layerCount) {
structMatVecMult(testStruct, nodesForUpdating, tempLayer, layerCount, whichCase);
tempLayer = nodesForUpdating->nodes[whichCase][layerCount];
// tempLayer = testStruct->layerNodes.values[layerCount + 1];
}
}
double calcCostC(CNNStructureC* inputStruct, dataC* inputData, reducedLayerNodesC* nodesForUpdating, int whichCase, bool updateLayersBool) {
//This version uses the input to update the layers and then calculate the cost.
if (updateLayersBool) {
updateLayersC(inputStruct, inputData, nodesForUpdating, whichCase);
}
//The cost only depends on the last layer's nodes. And there is no addition term added to the host_vector.
double costSum = 0.;
size_t numLayers = nodesForUpdating->numLayers;
// size_t numLayers = inputStruct->layerNodes.numLayers;
size_t desiredSize = inputData->numOutputs;
for (int iCnt = 0; iCnt < desiredSize - 1; ++iCnt) { // Cut down by one because desired has the extra 1.
// It doesn't really matter since both have 1 at the end.
costSum += pow((nodesForUpdating->nodes[whichCase][numLayers-1][iCnt] - inputData->outputNodes[whichCase][iCnt]), 2);
// costSum += pow((inputStruct->layerNodes.values[numLayers - 1][iCnt] - inputData->outputNodes[whichCase][iCnt]), 2);
}
return(costSum);
}
void makeGradPassC(CNNStructureC* testStruct, CNNStructureC* tempGradStruct, reducedLayerNodesC* nodesForUpdating,
dataC* data, size_t whichCase);
void makeGradPassCUDA(CNNStructureC* testStruct, CNNStructureCFlat* tempGradStruct, reducedLayerNodesC* nodesForUpdating,
dataC* data, size_t whichCase);
extern "C" void calcGradientPartsC(CNNStructureC holdAccumGradients[], structureC<int> *testCase, dataC* data,
CNNStructureC *testStruct, size_t begin, double* cost, reducedLayerNodesC* nodesForUpdating,
const size_t numBlocks, const size_t numThreads) {
size_t end = numBlocks * numThreads;
for (size_t iCnt = 1; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
setWeightsToZeros(&holdAccumGradients[iCnt]); // Zero this out because of the += later.
}
double tempCost = 0;
size_t tSet;
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
updateLayersC(testStruct, data, nodesForUpdating, tSet);
}
}
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
tempCost += calcCostC(testStruct, data, nodesForUpdating, tSet, false);
}
}
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
// Add to holdAccumGradients for this test set.
makeGradPassC(testStruct, &holdAccumGradients[iCnt], nodesForUpdating, data, tSet);
}
}
//For the moment, just gather them up, here.
for (size_t iCnt = 1; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
plusEqualsStruct(&holdAccumGradients[0], &holdAccumGradients[iCnt]);
}
*cost = tempCost / double(end - begin);
printf("\nCost in calcGradientPartsC ");
printf("\t%f", *cost);
}
extern "C" void calcGradientPartsCUDA(CNNStructureCFlat holdAccumGradients[], structureC<int> *testCase, dataC* data,
CNNStructureC *testStruct, size_t begin, double* cost, reducedLayerNodesC* nodesForUpdating,
const size_t numBlocks, const size_t numThreads) {
size_t end = numBlocks * numThreads;
// Set up (zero out) numThreads number of holdAccumGradients. Each will be added to as you work towards
// the average over all the training sets.
for (size_t iCnt = 1; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
setWeightsToZeros(&holdAccumGradients[iCnt]); // Zero this out because of the += later.
}
// launchCUDAGradientsZero(d_holdAccumGradientsCFlatWeights);
double tempCost = 0;
size_t tSet;
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
updateLayersC(testStruct, data, nodesForUpdating, tSet);
}
}
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
tempCost += calcCostC(testStruct, data, nodesForUpdating, tSet, false);
}
}
for (size_t blockNum = 0; blockNum < numBlocks; ++blockNum) {
for (size_t iCnt = 0; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
tSet = blockNum * numThreads + iCnt;
// Add to holdAccumGradients for this test set.
makeGradPassCUDA(testStruct, &holdAccumGradients[iCnt], nodesForUpdating, data, tSet);
}
}
//For the moment, just gather them up, here.
for (size_t iCnt = 1; iCnt < numThreads; ++iCnt) { //Note, currently hardwiring in begin at 0
plusEqualsStruct(&holdAccumGradients[0], &holdAccumGradients[iCnt]);
}
*cost = tempCost / double(end - begin);
printf("\nCost in calcGradientPartsCUDA ");
printf("\t%f", *cost);
}
void makeGradPassC(CNNStructureC* testStruct, CNNStructureC* tempGradStruct, reducedLayerNodesC* nodesForUpdating,
dataC* data, size_t whichCase){
// The goal here is to create the gradient for the single test case.
// There are multiple terms that need to be multiplied
// together to form each element. Complete a layer (from back to front)
// before proceding to the next layer. The reason is that you need the results of layer L
// inorder to get a cost for L-1.
/*Memory consideration: I have three vectors (pCPA, partRelu, and temppCpA that will be taking on varying size
as I go through the layers. Rather than resizing the vectors, I'm just going to allocate the memory for the largest
value that they will take on. Note, that I could define these once outside of here. However, at this point in my understanding,
I think that would introduce a problem when going highly parallel, i.e., each thread should have its own copy.
A likely more efficient way would be to make an array of each of these, then send one each into this function. Ideally,
you could leave them in place (not have to move them on, off, or create on the device each time you run this.*/
/*In going parallel, I now get the first layer nodes from the data input nodes. Then, the rest of the nodes (hidden layers and
output nodes) come from the reducedForUpdate nodes. */
//Find the largest size vector (Note, that is a number that could be precalculated and sent in).
size_t maxNodes = 0;
for (size_t iCnt = 0; iCnt < testStruct->layerNodes.numLayers; ++iCnt) {
if (testStruct->layerNodes.numNodes[iCnt] > maxNodes) {
maxNodes = testStruct->layerNodes.numNodes[iCnt];
}
}
double* pCpA = (double*)malloc(maxNodes* sizeof(double));
double* partRelu = (double*)malloc(sizeof(double)*maxNodes);
double* temppCpA = (double*)malloc(sizeof(double)*maxNodes);
size_t startLayer = testStruct->layerNodes.numLayers - 1;
for (size_t iCnt = 0; iCnt < testStruct->layerNodes.numNodes[startLayer]; ++iCnt) {
pCpA[iCnt] = 2.*(nodesForUpdating->nodes[whichCase][startLayer-1][iCnt] - data->outputNodes[whichCase][iCnt]);
// pCpA[iCnt] = 2.*(testStruct->layerNodes.values[startLayer][iCnt] - desired[iCnt]);
}
for (size_t layerCount = startLayer; layerCount > 0; --layerCount) {
if (layerCount == 1) {
matVecMultC(testStruct->weights.values[layerCount - 1],
data->inputNodes[whichCase], testStruct->weights.numRows[layerCount - 1],
testStruct->weights.numCols[layerCount - 1], partRelu);
}
else
{
matVecMultC(testStruct->weights.values[layerCount - 1],
nodesForUpdating->nodes[whichCase][layerCount - 2], testStruct->weights.numRows[layerCount - 1],
testStruct->weights.numCols[layerCount - 1], partRelu);
}
//The implication of these next lines would seem to be that you do not use the final layer in calculating partRelu.
//I have carried that into the above.
// matVecMultC(testStruct->weights.values[layerCount - 1],
// testStruct->layerNodes.values[layerCount - 1], testStruct->weights.numRows[layerCount - 1],
// testStruct->weights.numCols[layerCount - 1], partRelu);
//Sigma
for (size_t rowCount = 0; rowCount < testStruct->weights.numRows[layerCount - 1] - 1; ++rowCount) {
if (partRelu[rowCount] < 0.) {
partRelu[rowCount] = 0.;
}
else {
partRelu[rowCount] = 1.;
}
// partRelu[rowCount] = 1.; //uncomment here and comment above to Kill sigma till you understand it.
for (size_t colCount = 0; colCount < tempGradStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
//(partial z wrt w)*partial relu*pCpA
if (layerCount == 1) {
tempGradStruct->weights.values[layerCount - 1][rowCount][colCount] +=
data->inputNodes[whichCase][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
else {
tempGradStruct->weights.values[layerCount - 1][rowCount][colCount] +=
nodesForUpdating->nodes[whichCase][layerCount - 2][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
// tempGradStruct->weights.values[layerCount - 1][rowCount][colCount] +=
// testStruct->layerNodes.values[layerCount - 1][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
// Each row also has a bias term at the end of the row.
tempGradStruct->weights.values[layerCount - 1][rowCount][testStruct->weights.numCols[layerCount - 1] - 1] +=
partRelu[rowCount] * pCpA[rowCount];
}
if (layerCount > 1) {
//Calculate the pCpA host_vector for the next round.
for (size_t colCount = 0; colCount < testStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
double tempSum = 0.;
for (size_t rowCount = 0; rowCount < testStruct->weights.numRows[layerCount - 1] - 1; ++rowCount) {
tempSum += testStruct->weights.values[layerCount - 1][rowCount][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
temppCpA[colCount] = tempSum;
}
for (size_t colCount = 0; colCount < testStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
pCpA[colCount] = temppCpA[colCount];
}
}
}
free(pCpA);
free(partRelu);
free(temppCpA);
}
void makeGradPassCUDA(CNNStructureC* testStruct, CNNStructureCFlat* tempGradStruct, reducedLayerNodesC* nodesForUpdating,
dataC* data, size_t whichCase) {
// The goal here is to create the gradient for the single test case.
// There are multiple terms that need to be multiplied
// together to form each element. Complete a layer (from back to front)
// before proceding to the next layer. The reason is that you need the results of layer L
// inorder to get a cost for L-1.
/*Memory consideration: I have three vectors (pCPA, partRelu, and temppCpA that will be taking on varying size
as I go through the layers. Rather than resizing the vectors, I'm just going to allocate the memory for the largest
value that they will take on. Note, that I could define these once outside of here. However, at this point in my understanding,
I think that would introduce a problem when going highly parallel, i.e., each thread should have its own copy.
A likely more efficient way would be to make an array of each of these, then send one each into this function. Ideally,
you could leave them in place (not have to move them on, off, or create on the device each time you run this.*/
/*In going parallel, I now get the first layer nodes from the data input nodes. Then, the rest of the nodes (hidden layers and
output nodes) come from the reducedForUpdate nodes. */
//Find the largest size vector (Note, that is a number that could be precalculated and sent in).
size_t indexFlatten;
size_t maxNodes = 0;
for (size_t iCnt = 0; iCnt < testStruct->layerNodes.numLayers; ++iCnt) {
if (testStruct->layerNodes.numNodes[iCnt] > maxNodes) {
maxNodes = testStruct->layerNodes.numNodes[iCnt];
}
}
double* pCpA = (double*)malloc(maxNodes * sizeof(double));
double* partRelu = (double*)malloc(sizeof(double)*maxNodes);
double* temppCpA = (double*)malloc(sizeof(double)*maxNodes);
size_t startLayer = testStruct->layerNodes.numLayers - 1;
for (size_t iCnt = 0; iCnt < testStruct->layerNodes.numNodes[startLayer]; ++iCnt) {
pCpA[iCnt] = 2.*(nodesForUpdating->nodes[whichCase][startLayer - 1][iCnt] - data->outputNodes[whichCase][iCnt]);
// pCpA[iCnt] = 2.*(testStruct->layerNodes.values[startLayer][iCnt] - desired[iCnt]);
}
for (size_t layerCount = startLayer; layerCount > 0; --layerCount) {
if (layerCount == 1) {
matVecMultC(testStruct->weights.values[layerCount - 1],
data->inputNodes[whichCase], testStruct->weights.numRows[layerCount - 1],
testStruct->weights.numCols[layerCount - 1], partRelu);
}
else
{
matVecMultC(testStruct->weights.values[layerCount - 1],
nodesForUpdating->nodes[whichCase][layerCount - 2], testStruct->weights.numRows[layerCount - 1],
testStruct->weights.numCols[layerCount - 1], partRelu);
}
//The implication of these next lines would seem to be that you do not use the final layer in calculating partRelu.
//I have carried that into the above.
// matVecMultC(testStruct->weights.values[layerCount - 1],
// testStruct->layerNodes.values[layerCount - 1], testStruct->weights.numRows[layerCount - 1],
// testStruct->weights.numCols[layerCount - 1], partRelu);
//Sigma
for (size_t rowCount = 0; rowCount < testStruct->weights.numRows[layerCount - 1] - 1; ++rowCount) {
if (partRelu[rowCount] < 0.) {
partRelu[rowCount] = 0.;
}
else {
partRelu[rowCount] = 1.;
}
// partRelu[rowCount] = 1.; //uncomment here and comment above to Kill sigma till you understand it.
for (size_t colCount = 0; colCount < tempGradStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
//(partial z wrt w)*partial relu*pCpA
if (layerCount == 1) {
indexFlatten = tempGradStruct->weights.startLayer[layerCount - 1] +
tempGradStruct->weights.numCols[layerCount-1]*rowCount + colCount;
tempGradStruct->weights.values[indexFlatten] +=
data->inputNodes[whichCase][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
else {
indexFlatten = tempGradStruct->weights.startLayer[layerCount - 1] +
tempGradStruct->weights.numCols[layerCount - 1] * rowCount + colCount;
tempGradStruct->weights.values[indexFlatten] +=
nodesForUpdating->nodes[whichCase][layerCount - 2][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
// tempGradStruct->weights.values[layerCount - 1][rowCount][colCount] +=
// testStruct->layerNodes.values[layerCount - 1][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
// Each row also has a bias term at the end of the row.
indexFlatten = tempGradStruct->weights.startLayer[layerCount - 1] +
tempGradStruct->weights.numCols[layerCount - 1] * rowCount + testStruct->weights.numCols[layerCount - 1] - 1;
tempGradStruct->weights.values[indexFlatten] += partRelu[rowCount] * pCpA[rowCount];
}
if (layerCount > 1) {
//Calculate the pCpA host_vector for the next round.
for (size_t colCount = 0; colCount < testStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
double tempSum = 0.;
for (size_t rowCount = 0; rowCount < testStruct->weights.numRows[layerCount - 1] - 1; ++rowCount) {
tempSum += testStruct->weights.values[layerCount - 1][rowCount][colCount] * partRelu[rowCount] * pCpA[rowCount];
}
temppCpA[colCount] = tempSum;
}
for (size_t colCount = 0; colCount < testStruct->weights.numCols[layerCount - 1] - 1; ++colCount) {
pCpA[colCount] = temppCpA[colCount];
}
}
}
free(pCpA);
free(partRelu);
free(temppCpA);
}
|
d4e579fd1c25c97ab93c25309fe7cc2adb2acbfc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <hip/hip_fp16.h>
#include <hip/hip_runtime.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include "SOIL.h"
texture<uchar4, 2, hipReadModeElementType> texImage;
void checkGPUOperation()
{
hipError_t code = hipGetLastError();
if (code != hipSuccess){
fprintf(stderr, "Cuda Error : %s\n", hipGetErrorString(code));
exit(-1);
}
}
__global__ void median_filter(uchar4 *pDst,
int width,
int height,
int radius)
{
// compute idx
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
// arrays for region pixels
unsigned char xArr[512], yArr[512], zArr[512], wArr[512]; //TODO : unhardore
int arraySize = 0;
// prevent texture out of range
int fromx = thrust::max(0, tidx - radius);
int tox = thrust::min(width, tidx + radius);
int fromy = thrust::max(0, tidy - radius);
int toy = thrust::min(height, tidy + radius);
// collect region pixels
for (int i = fromx; i < tox; i++){
for (int j = fromy; j < toy; j++){
uchar4 tmp = tex2D(texImage, i + 0.5f, j + 0.5f);
xArr[arraySize] = tmp.x;
yArr[arraySize] = tmp.y;
zArr[arraySize] = tmp.z;
wArr[arraySize] = tmp.w;
arraySize++;
}
}
//sort pixels
thrust::sort(thrust::seq, xArr, xArr + arraySize);
thrust::sort(thrust::seq, yArr, yArr + arraySize);
thrust::sort(thrust::seq, zArr, zArr + arraySize);
thrust::sort(thrust::seq, wArr, wArr + arraySize);
// take the median. Middle pixels in sort array
uchar4 res;
res.x = xArr[arraySize / 2];
res.y = yArr[arraySize / 2];;
res.z = zArr[arraySize / 2];
res.w = wArr[arraySize / 2];
pDst[tidx + tidy * width] = res;
}
int main(int argc, char ** argv)
{
//parsing arguments
const char *srcImagePath = argc >= 2 ? argv[1] : "Lenna.png";
const char *dstImagePath = argc >= 3 ? argv[2] : "result.tga";
int radius = argc >= 4 ? atoi(argv[3]) : 1;
// loading image
int width, height, channels;
unsigned char* srcImage =
SOIL_load_image( srcImagePath,
&width, &height, &channels,
SOIL_LOAD_RGBA /*unhardcode*/ );
if (srcImage == NULL) {
fprintf(stderr, "failed loading image");
return -1;
}
int size = width * height * 4;
printf("image loaded. width : %d, height %d\n", width, height);
//create cuda array
hipChannelFormatDesc channelDesc =
hipCreateChannelDesc(8, 8, 8, 8, hipChannelFormatKindUnsigned);
checkGPUOperation();
hipArray *cuArray;
hipMallocArray(&cuArray, &channelDesc, width, height);
checkGPUOperation();
// copy image to texture
hipMemcpyToArray(cuArray, 0, 0, srcImage, size, hipMemcpyHostToDevice);
checkGPUOperation();
// free unused memery
SOIL_free_image_data(srcImage);
// bind texture
hipBindTextureToArray(texImage, cuArray);
checkGPUOperation();
//allocate memory to result image
uchar4 *devResult;
hipMalloc((void **)&devResult, size);
// run kernel
dim3 block(32, 8);
dim3 grid( width / block.x + ((width % block.x) ? 1: 0),
height / block.y + ((height % block.y) ? 1: 0) );
hipLaunchKernelGGL(( median_filter), dim3(grid), dim3(block), 0, 0, devResult, width, height, radius);
hipDeviceSynchronize();
// copy result
unsigned char* dstImage = (unsigned char *)malloc(size);
hipMemcpy(dstImage, devResult, size, hipMemcpyDeviceToHost);
SOIL_save_image( dstImagePath,
SOIL_SAVE_TYPE_TGA,
width, height, SOIL_LOAD_RGBA,
dstImage);
//free memory
hipFreeArray(cuArray);
hipFree(devResult);
free(dstImage);
printf("Done\n");
return 0;
}
|
d4e579fd1c25c97ab93c25309fe7cc2adb2acbfc.cu
|
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cuda_profiler_api.h>
#include <cuda_fp16.h>
#include <cuda.h>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/device_vector.h>
#include "SOIL.h"
texture<uchar4, 2, cudaReadModeElementType> texImage;
void checkGPUOperation()
{
cudaError_t code = cudaGetLastError();
if (code != cudaSuccess){
fprintf(stderr, "Cuda Error : %s\n", cudaGetErrorString(code));
exit(-1);
}
}
__global__ void median_filter(uchar4 *pDst,
int width,
int height,
int radius)
{
// compute idx
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
// arrays for region pixels
unsigned char xArr[512], yArr[512], zArr[512], wArr[512]; //TODO : unhardore
int arraySize = 0;
// prevent texture out of range
int fromx = thrust::max(0, tidx - radius);
int tox = thrust::min(width, tidx + radius);
int fromy = thrust::max(0, tidy - radius);
int toy = thrust::min(height, tidy + radius);
// collect region pixels
for (int i = fromx; i < tox; i++){
for (int j = fromy; j < toy; j++){
uchar4 tmp = tex2D(texImage, i + 0.5f, j + 0.5f);
xArr[arraySize] = tmp.x;
yArr[arraySize] = tmp.y;
zArr[arraySize] = tmp.z;
wArr[arraySize] = tmp.w;
arraySize++;
}
}
//sort pixels
thrust::sort(thrust::seq, xArr, xArr + arraySize);
thrust::sort(thrust::seq, yArr, yArr + arraySize);
thrust::sort(thrust::seq, zArr, zArr + arraySize);
thrust::sort(thrust::seq, wArr, wArr + arraySize);
// take the median. Middle pixels in sort array
uchar4 res;
res.x = xArr[arraySize / 2];
res.y = yArr[arraySize / 2];;
res.z = zArr[arraySize / 2];
res.w = wArr[arraySize / 2];
pDst[tidx + tidy * width] = res;
}
int main(int argc, char ** argv)
{
//parsing arguments
const char *srcImagePath = argc >= 2 ? argv[1] : "Lenna.png";
const char *dstImagePath = argc >= 3 ? argv[2] : "result.tga";
int radius = argc >= 4 ? atoi(argv[3]) : 1;
// loading image
int width, height, channels;
unsigned char* srcImage =
SOIL_load_image( srcImagePath,
&width, &height, &channels,
SOIL_LOAD_RGBA /*unhardcode*/ );
if (srcImage == NULL) {
fprintf(stderr, "failed loading image");
return -1;
}
int size = width * height * 4;
printf("image loaded. width : %d, height %d\n", width, height);
//create cuda array
cudaChannelFormatDesc channelDesc =
cudaCreateChannelDesc(8, 8, 8, 8, cudaChannelFormatKindUnsigned);
checkGPUOperation();
cudaArray *cuArray;
cudaMallocArray(&cuArray, &channelDesc, width, height);
checkGPUOperation();
// copy image to texture
cudaMemcpyToArray(cuArray, 0, 0, srcImage, size, cudaMemcpyHostToDevice);
checkGPUOperation();
// free unused memery
SOIL_free_image_data(srcImage);
// bind texture
cudaBindTextureToArray(texImage, cuArray);
checkGPUOperation();
//allocate memory to result image
uchar4 *devResult;
cudaMalloc((void **)&devResult, size);
// run kernel
dim3 block(32, 8);
dim3 grid( width / block.x + ((width % block.x) ? 1: 0),
height / block.y + ((height % block.y) ? 1: 0) );
median_filter<<<grid, block>>>(devResult, width, height, radius);
cudaDeviceSynchronize();
// copy result
unsigned char* dstImage = (unsigned char *)malloc(size);
cudaMemcpy(dstImage, devResult, size, cudaMemcpyDeviceToHost);
SOIL_save_image( dstImagePath,
SOIL_SAVE_TYPE_TGA,
width, height, SOIL_LOAD_RGBA,
dstImage);
//free memory
cudaFreeArray(cuArray);
cudaFree(devResult);
free(dstImage);
printf("Done\n");
return 0;
}
|
e7b265ab67a601c14b9bbb9ae8135f11772c365f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "type.h"
__device__ void reduceCorePow2(const unsigned TID, unsigned N, double *reduce){
}
__device__ void reduceMadDotGG(const unsigned TID, unsigned N, double *reduce,
const double *x, const double *y){
// serial version for clarity
/*
if(!TID){
reduce[0] = x[0] * y[0];
for(int i = 1; i < N; i++)
reduce[0] += x[i]*y[i];
}
*/
unsigned threads, nextSmaller;
// parallel version
if(__popc(N) != 1){ // non power of two
// do one reduction to a power of two
nextSmaller = 1 << (31-__clz(N));
threads = nextSmaller/2;
//! @todo threads could be > nextSmaller/2,
if(TID < threads){
reduce[TID] = x[TID] * y[TID] + x[TID + threads] * y[TID + threads];
if(TID < N - nextSmaller)
reduce[TID] += x[TID + nextSmaller] * y[TID + nextSmaller];
}
__syncthreads();
} else {
threads = N/2;
}
reduceCorePow2(TID, threads, reduce);
}
__device__ void reduceCore(const unsigned TID, unsigned N, double *reduce){
/*
if(!TID){
for(int i = 1; i < N; i++)
reduce[0] += reduce[i];
}
*/
unsigned threads;
while(N/2){
threads = (N + 1) / 2;
if(TID < threads - (N % 2))
reduce[TID] += reduce[TID + threads];
__syncthreads();
N = threads;
}
}
/*
Written for register-based storage. Result is placed in *reduce.
*/
/*! @todo dot products could be faster; specifically, each thread is only
multiplying or adding, not both simultaneously.
*/
__device__ void dotRR(const unsigned TID,
const unsigned N,
const double x,
const double y){
double *reduce = shared;
// compute dot product terms
// place x*y in shared memory (reduce)
__syncthreads();
reduce[TID] = x * y;
__syncthreads();
reduceCore(TID, N, reduce);
}
__device__ void dotRG(const unsigned TID,
const unsigned N,
const double x,
const double *y){ // assume blockDim.x elements
double *reduce = shared;
__syncthreads();
reduce[TID] = x * y[TID];
__syncthreads();
reduceCore(TID, N, reduce);
}
__device__ void dotGG(const unsigned TID,
const unsigned N,
const double *x,
const double *y,
double *reduce){ // assume blockDim.x elements
__syncthreads();
reduce[TID] = x[TID] * y[TID];
__syncthreads();
reduceCore(TID, N, reduce);
}
/*
A is in constant memory. A must be column-major and square (NxN).
*/
__device__ double vecRMatCSq(const unsigned TID,
const unsigned BID,
const double x,
const unsigned N,
const double *A,
const unsigned lda){
double retVal = 0.0;
__syncthreads();
shared[TID] = x;
__syncthreads();
/*
for(int i = 0; i < N; i++){
if(i == TID)
continue;
retVal += shared[i] * A[lda * TID + i];
}
retVal += x * A[lda * TID + TID];
*/
for(int i = 0; i < N; i++)
retVal += shared[i] * A[lda * i + TID];
return retVal;
}
|
e7b265ab67a601c14b9bbb9ae8135f11772c365f.cu
|
#include "type.h"
__device__ void reduceCorePow2(const unsigned TID, unsigned N, double *reduce){
}
__device__ void reduceMadDotGG(const unsigned TID, unsigned N, double *reduce,
const double *x, const double *y){
// serial version for clarity
/*
if(!TID){
reduce[0] = x[0] * y[0];
for(int i = 1; i < N; i++)
reduce[0] += x[i]*y[i];
}
*/
unsigned threads, nextSmaller;
// parallel version
if(__popc(N) != 1){ // non power of two
// do one reduction to a power of two
nextSmaller = 1 << (31-__clz(N));
threads = nextSmaller/2;
//! @todo threads could be > nextSmaller/2,
if(TID < threads){
reduce[TID] = x[TID] * y[TID] + x[TID + threads] * y[TID + threads];
if(TID < N - nextSmaller)
reduce[TID] += x[TID + nextSmaller] * y[TID + nextSmaller];
}
__syncthreads();
} else {
threads = N/2;
}
reduceCorePow2(TID, threads, reduce);
}
__device__ void reduceCore(const unsigned TID, unsigned N, double *reduce){
/*
if(!TID){
for(int i = 1; i < N; i++)
reduce[0] += reduce[i];
}
*/
unsigned threads;
while(N/2){
threads = (N + 1) / 2;
if(TID < threads - (N % 2))
reduce[TID] += reduce[TID + threads];
__syncthreads();
N = threads;
}
}
/*
Written for register-based storage. Result is placed in *reduce.
*/
/*! @todo dot products could be faster; specifically, each thread is only
multiplying or adding, not both simultaneously.
*/
__device__ void dotRR(const unsigned TID,
const unsigned N,
const double x,
const double y){
double *reduce = shared;
// compute dot product terms
// place x*y in shared memory (reduce)
__syncthreads();
reduce[TID] = x * y;
__syncthreads();
reduceCore(TID, N, reduce);
}
__device__ void dotRG(const unsigned TID,
const unsigned N,
const double x,
const double *y){ // assume blockDim.x elements
double *reduce = shared;
__syncthreads();
reduce[TID] = x * y[TID];
__syncthreads();
reduceCore(TID, N, reduce);
}
__device__ void dotGG(const unsigned TID,
const unsigned N,
const double *x,
const double *y,
double *reduce){ // assume blockDim.x elements
__syncthreads();
reduce[TID] = x[TID] * y[TID];
__syncthreads();
reduceCore(TID, N, reduce);
}
/*
A is in constant memory. A must be column-major and square (NxN).
*/
__device__ double vecRMatCSq(const unsigned TID,
const unsigned BID,
const double x,
const unsigned N,
const double *A,
const unsigned lda){
double retVal = 0.0;
__syncthreads();
shared[TID] = x;
__syncthreads();
/*
for(int i = 0; i < N; i++){
if(i == TID)
continue;
retVal += shared[i] * A[lda * TID + i];
}
retVal += x * A[lda * TID + TID];
*/
for(int i = 0; i < N; i++)
retVal += shared[i] * A[lda * i + TID];
return retVal;
}
|
33436e7f58c210d0bc931d60c65d879bc8dbce2f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernels_hip.cuh"
__device__ void prepareIndexexPitches(int *row, int *column, size_t *pitchOld, size_t *pitchNew)
{
*row = (blockIdx.x * blockDim.x) + threadIdx.x;
*column = (blockIdx.y * blockDim.y) + threadIdx.y;
*pitchOld = *pitchOld / sizeof(int);
*pitchNew = *pitchNew / sizeof(int);
}
__global__ void checkAbove(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column - 1 + columns) % columns) * pitchOld + row;
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkUnder(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column + 1) % columns) * pitchOld + row;
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkRight(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = column * pitchOld + ((row + 1) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkLeft(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = column * pitchOld + (((row - 1) + rows) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkRightUnder(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column + 1) % columns) * pitchOld + ((row + 1) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkLeftUnder(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column + 1) % columns) * pitchOld + ((row - 1 + rows) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkRightAbove(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column - 1 + columns) % columns) * pitchOld + ((row + 1) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkLeftAbove(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column - 1 + columns) % columns) * pitchOld + ((row - 1 + rows) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
|
33436e7f58c210d0bc931d60c65d879bc8dbce2f.cu
|
#include "kernels.cuh"
__device__ void prepareIndexexPitches(int *row, int *column, size_t *pitchOld, size_t *pitchNew)
{
*row = (blockIdx.x * blockDim.x) + threadIdx.x;
*column = (blockIdx.y * blockDim.y) + threadIdx.y;
*pitchOld = *pitchOld / sizeof(int);
*pitchNew = *pitchNew / sizeof(int);
}
__global__ void checkAbove(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column - 1 + columns) % columns) * pitchOld + row;
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkUnder(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column + 1) % columns) * pitchOld + row;
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkRight(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = column * pitchOld + ((row + 1) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkLeft(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = column * pitchOld + (((row - 1) + rows) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkRightUnder(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column + 1) % columns) * pitchOld + ((row + 1) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkLeftUnder(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column + 1) % columns) * pitchOld + ((row - 1 + rows) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkRightAbove(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column - 1 + columns) % columns) * pitchOld + ((row + 1) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
__global__ void checkLeftAbove(int *board, int *newBoard, int rows, int columns, size_t pitchOld, size_t pitchNew)
{
int row, column;
prepareIndexexPitches(&row, &column, &pitchOld, &pitchNew);
if (row < rows && column < columns)
{
int idx = ((column - 1 + columns) % columns) * pitchOld + ((row - 1 + rows) % rows);
newBoard[column * pitchNew + row] += board[idx];
}
}
|
787ce8ae65603e9c43dc5f05abac389e07847b60.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Accelerated Computing for Deep Learning
*/
/*First part is to read the MNIST dataset*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include "timer.h"//All the head files below are from nbody-gpu.cu
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#include "check.h"
#define DimBlock 256 //Define the Dimblock
#define n 28 // Define the thread_per_block
using namespace std;
int ReverseInt(int i) //Integer reverse, change the data into binary
{
unsigned char ch1, ch2, ch3, ch4;
ch1 = i & 255;
ch2 = (i >> 8) & 255;
ch3 = (i >> 16) & 255;
ch4 = (i >> 24) & 255;
return((int)ch1 << 24) + ((int)ch2 << 16) + ((int)ch3 << 8) + ch4;
}
void read_Mnist_Images(vector<vector <double> > &images) //Read dataset
{
ifstream file("t10k-images.idx3-ubyte", ios::binary); //Read the MNIST in binary
if (file.is_open())
{
int magic_number = 0;
int number_of_images = 0;
int n_rows = 0;
int n_cols = 0;
file.read((char*)&magic_number, sizeof(magic_number));
file.read((char*)&number_of_images, sizeof(number_of_images));
file.read((char*)&n_rows, sizeof(n_rows));
file.read((char*)&n_cols, sizeof(n_cols));
magic_number = ReverseInt(magic_number);
number_of_images = ReverseInt(number_of_images);
n_rows = ReverseInt(n_rows);
n_cols = ReverseInt(n_cols);
cout << "magic number = " << magic_number << endl; // show magic number
cout << "number of images = " << number_of_images << endl; // show inmage numbers
cout << "rows = " << n_rows << endl; //show number of rows
cout << "cols = " << n_cols << endl; //show number of cols
for (int i = 0; i < 10000; i++) //number_of_images, read 10000 images
{
vector<double>tp;
for (int r = 0; r < n_rows; r++)
{
for (int c = 0; c < n_cols; c++)
{
unsigned char image = 1;
file.read((char*)&image, sizeof(image));
tp.push_back(image);
}
}
images.push_back(tp);
}
}
}
/*GPU kernel code, initiated by CPU, cannot be called by other kernel*/
__global__
void blurkernel (unsigned char *in, unsigned char *out, int w, int h)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (col < h && row < w )
{
int pixVal = 0;
int pixels = 0;
int BLUR_SIZE = 1; // Define the blur size as 1
for (int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE+1; ++blurcol){
for (int blurrow = -BLUR_SIZE; blurrow < BLUR_SIZE+1; ++blurrow)
{
int currow = row + blurrow;
int curcol = col + blurcol;
if (currow > -1 && currow < w && curcol > -1 && curcol < h)
{
pixVal += in[curcol * w + currow];
pixels++; //Keep track of number of pixels in the accumulated total
}
}
}
out[col * w + row] = (unsigned char)(pixVal / pixels); //Write our new pixel value out
}
}
/*use main function*/
int main()
{
vector< vector<double> > images;
read_Mnist_Images(images); //Read images
ofstream outFile("train5.idx3-ubyte", ios::binary); //use ofstream to create a new file named train5.idx3-ubyte
for (int i = 0; i < images.size(); i++)
{
for (int j = 0; j < images[0].size(); j++) //for (auto iter = labels.begin(); iter != labels.end(); iter++)
{
}
}
/*Image Blur 10 times as a 2D Kernel*/
/*unsigned char in, unsigned char out, int w, int h*/
unsigned char *in; //Define the size of char in and out
unsigned char *out;
int w = images.size(); //Define the size of height and width
int h = images[0].size();
int size = 250000 *n * n * sizeof (unsigned char);// Changed the float to char, increase the memory
hipMallocManaged (&out, size);
hipMallocManaged (&in, size);
for(int col =0;col < w ;col++)
{
for(int row =0;row < h ;row++)
{
in[col*w+row] = images[col][row];
out[col*w+row] = images[col][row];
}
}
/*code below are from the 01-nbody-gpu.cu*/
const int nIters = 1;//Blur one time
double totalTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
StartTimer();
/*Blur the image*/
for(int col =0;col < w ;col++)
{
for(int row =0;row < h ;row++)
{
//in [col*w+ row] = out [col*w+ row] ;
}
}
dim3 Dimblock (28, 28, 1); // 2D, n*n*1
dim3 Dimgrid ((n - 1 / Dimblock.x) + 1, (n - 1/ Dimblock.y) + 1, 1); //Total 3 dimension, I used 2 dims
hipLaunchKernelGGL(( blurkernel) , dim3(Dimblock), dim3(Dimblock), 0, 0, in, out, 28, 28); //Write all parameters in the kernal
hipDeviceSynchronize();// synchronized, Waiting for GPU kernel execution to end
const double tElapsed = GetTimer() / 1000.0;
totalTime += tElapsed;
}
for(int col =0;col < w ;col++)
{
for(int row =0;row < h ;row++)
{
outFile.write((char*)&out[col*w+ row], sizeof(out[col*w+ row]));
}
}
cout <<totalTime << " ";
return 0;
}
|
787ce8ae65603e9c43dc5f05abac389e07847b60.cu
|
/*
Accelerated Computing for Deep Learning
*/
/*First part is to read the MNIST dataset*/
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
#include "timer.h"//All the head files below are from nbody-gpu.cu
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "timer.h"
#include "check.h"
#define DimBlock 256 //Define the Dimblock
#define n 28 // Define the thread_per_block
using namespace std;
int ReverseInt(int i) //Integer reverse, change the data into binary
{
unsigned char ch1, ch2, ch3, ch4;
ch1 = i & 255;
ch2 = (i >> 8) & 255;
ch3 = (i >> 16) & 255;
ch4 = (i >> 24) & 255;
return((int)ch1 << 24) + ((int)ch2 << 16) + ((int)ch3 << 8) + ch4;
}
void read_Mnist_Images(vector<vector <double> > &images) //Read dataset
{
ifstream file("t10k-images.idx3-ubyte", ios::binary); //Read the MNIST in binary
if (file.is_open())
{
int magic_number = 0;
int number_of_images = 0;
int n_rows = 0;
int n_cols = 0;
file.read((char*)&magic_number, sizeof(magic_number));
file.read((char*)&number_of_images, sizeof(number_of_images));
file.read((char*)&n_rows, sizeof(n_rows));
file.read((char*)&n_cols, sizeof(n_cols));
magic_number = ReverseInt(magic_number);
number_of_images = ReverseInt(number_of_images);
n_rows = ReverseInt(n_rows);
n_cols = ReverseInt(n_cols);
cout << "magic number = " << magic_number << endl; // show magic number
cout << "number of images = " << number_of_images << endl; // show inmage numbers
cout << "rows = " << n_rows << endl; //show number of rows
cout << "cols = " << n_cols << endl; //show number of cols
for (int i = 0; i < 10000; i++) //number_of_images, read 10000 images
{
vector<double>tp;
for (int r = 0; r < n_rows; r++)
{
for (int c = 0; c < n_cols; c++)
{
unsigned char image = 1;
file.read((char*)&image, sizeof(image));
tp.push_back(image);
}
}
images.push_back(tp);
}
}
}
/*GPU kernel code, initiated by CPU, cannot be called by other kernel*/
__global__
void blurkernel (unsigned char *in, unsigned char *out, int w, int h)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (col < h && row < w )
{
int pixVal = 0;
int pixels = 0;
int BLUR_SIZE = 1; // Define the blur size as 1
for (int blurcol = -BLUR_SIZE; blurcol < BLUR_SIZE+1; ++blurcol){
for (int blurrow = -BLUR_SIZE; blurrow < BLUR_SIZE+1; ++blurrow)
{
int currow = row + blurrow;
int curcol = col + blurcol;
if (currow > -1 && currow < w && curcol > -1 && curcol < h)
{
pixVal += in[curcol * w + currow];
pixels++; //Keep track of number of pixels in the accumulated total
}
}
}
out[col * w + row] = (unsigned char)(pixVal / pixels); //Write our new pixel value out
}
}
/*use main function*/
int main()
{
vector< vector<double> > images;
read_Mnist_Images(images); //Read images
ofstream outFile("train5.idx3-ubyte", ios::binary); //use ofstream to create a new file named train5.idx3-ubyte
for (int i = 0; i < images.size(); i++)
{
for (int j = 0; j < images[0].size(); j++) //for (auto iter = labels.begin(); iter != labels.end(); iter++)
{
}
}
/*Image Blur 10 times as a 2D Kernel*/
/*unsigned char in, unsigned char out, int w, int h*/
unsigned char *in; //Define the size of char in and out
unsigned char *out;
int w = images.size(); //Define the size of height and width
int h = images[0].size();
int size = 250000 *n * n * sizeof (unsigned char);// Changed the float to char, increase the memory
cudaMallocManaged (&out, size);
cudaMallocManaged (&in, size);
for(int col =0;col < w ;col++)
{
for(int row =0;row < h ;row++)
{
in[col*w+row] = images[col][row];
out[col*w+row] = images[col][row];
}
}
/*code below are from the 01-nbody-gpu.cu*/
const int nIters = 1;//Blur one time
double totalTime = 0.0;
for (int iter = 1; iter <= nIters; iter++) {
StartTimer();
/*Blur the image*/
for(int col =0;col < w ;col++)
{
for(int row =0;row < h ;row++)
{
//in [col*w+ row] = out [col*w+ row] ;
}
}
dim3 Dimblock (28, 28, 1); // 2D, n*n*1
dim3 Dimgrid ((n - 1 / Dimblock.x) + 1, (n - 1/ Dimblock.y) + 1, 1); //Total 3 dimension, I used 2 dims
blurkernel <<<Dimblock, Dimblock>>> (in, out, 28, 28); //Write all parameters in the kernal
cudaDeviceSynchronize();// synchronized, Waiting for GPU kernel execution to end
const double tElapsed = GetTimer() / 1000.0;
totalTime += tElapsed;
}
for(int col =0;col < w ;col++)
{
for(int row =0;row < h ;row++)
{
outFile.write((char*)&out[col*w+ row], sizeof(out[col*w+ row]));
}
}
cout <<totalTime << " ";
return 0;
}
|
06eba86922cbd1de3e2cfd12ca8c93017aae20b3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <assert.h>
#include "ffnn.cuh"
#include "nn/loss.cuh"
#include "nn_param.cuh"
#include "nn/nn_layer.cuh"
#include "nn/cuda_utils.cuh"
#include "nn/activations.cuh"
ffnn * ffnn_init() {
ffnn * nn = (ffnn *)malloc(sizeof(ffnn));
if (!nn) { printf("Unable to initialize ffnn\n"); return NULL; }
int l;
for (l = 0; l < NUM_LAYERS; l++) {
nn->layer[l] = (nnlayer *)malloc(sizeof(nnlayer));
}
nn->Y = matrix_init(BATCH_SIZE, NUM_OUTPUTS);
nn->dY = matrix_init(BATCH_SIZE, NUM_OUTPUTS);
matrix_allocate(nn->Y);
matrix_allocate(nn->dY);
return nn;
}
int ffnn_free(ffnn * nn) {
if (!nn) { printf("Unable to initialize ffnn\n"); return -1; }
int l;
for (l = 0; l < NUM_LAYERS; l++) {
int n = nnl_free(nn->layer[l]);
if (n) return -1;
}
matrix_free(nn->Y);
matrix_free(nn->dY);
free(nn);
return 0;
}
void add_layer(ffnn * nn, int l, int Wx, int Wy, char f) {
if (!nn) { printf("Unable to initialize ffnn\n"); exit(1); }
nn->layer[l] = nnl_init(l, Wx, Wy, f);
}
// Forward pass (fp) through layers of the neural net
Matrix * ffnn_fp_global(ffnn * nn, Matrix * X) {
nn->layer[0]->A = X;
// Forward Pass though layers
int l;
for (l = 0; l < NUM_LAYERS-1; l++) {
nn->layer[l]->Z = nnl_forward_pass_global(nn->layer[l], nn->layer[l]->A);
switch (nn->layer[l]->f) { // activation of current hidden layer
// activate Z of cur layer and forward pass as input of next layer
case 'r': relu_forward_pass_global(nn->layer[l+1]->A, nn->layer[l]->Z); break;
case 's': sigmoid_forward_pass_global(nn->layer[l+1]->A, nn->layer[l]->Z); break;
default: break;
}
}
// Forward Pass through output layer to get Prediction matrix Y
nn->layer[NUM_LAYERS-1]->Z = nnl_forward_pass_global(nn->layer[NUM_LAYERS-1], nn->layer[NUM_LAYERS-1]->A);
switch (nn->layer[NUM_LAYERS-1]->f) { // activation of Output layer
case 'r': relu_forward_pass_global(nn->Y, nn->layer[NUM_LAYERS-1]->Z); break;
case 's': sigmoid_forward_pass_global(nn->Y, nn->layer[NUM_LAYERS-1]->Z); break;
default: break;
}
return nn->Y;
}
// back propagation (bp) through the layers of the neural net
void ffnn_bp_global(ffnn * nn, Matrix * Y_pred, Matrix * Y_truth, data_t lr) {
dBCEloss(nn->dY, Y_pred, Y_truth);
// uncomment below to see loss at each epoch for each batch, recommended Batch-size =10, #epoch = 3 to 5
// printf("Loss\n");
// print_matrix_d(nn->dY);
// printf("\n");
// Back Prop through output layer
switch (nn->layer[NUM_LAYERS-1]->f) {
case 'r': relu_back_propagation_global(nn->layer[NUM_LAYERS-1], nn->dY, lr); break;
case 's': sigmoid_back_propagation_global(nn->layer[NUM_LAYERS-1], nn->dY, lr); break;
default: break;
}
nnl_back_propagation_global(nn->layer[NUM_LAYERS-1], nn->layer[NUM_LAYERS-1]->dZ, lr);
// uncomment below to see weight and bias updates for last layer
// printf("Weights\n");
// print_matrix_d(nn->layer[NUM_LAYERS-1]->W);
// printf("\n");
// printf("bias\n");
// print_matrix_d(nn->layer[NUM_LAYERS-1]->b);
// printf("\n");
// Back propagate through hidden linear
int l;
for (l = NUM_LAYERS-1; l > 0; l--) {
switch (nn->layer[l]->f) { // activation of current hidden layer
case 'r': relu_back_propagation_global(nn->layer[l-1], nn->layer[l]->dA, lr); break;
case 's': sigmoid_back_propagation_global(nn->layer[l-1], nn->layer[l]->dA, lr); break;
default: break;
}
nnl_back_propagation_global(nn->layer[l-1], nn->layer[l-1]->dZ, lr);
// uncomment below to see weight and bias updates for all layers (Not recommended)
// printf("Weights\n");
// print_matrix_d(nn->layer[NUM_LAYERS-1]->W);
// printf("\n");
// printf("bias\n");
// print_matrix_d(nn->layer[NUM_LAYERS-1]->b);
// printf("\n");
}
// need device synchronize because next iteration forward pass input depends on output
// from this iterations back propagation
hipDeviceSynchronize();
}
data_t * get_prediction(Matrix * Y_batch) {
if (!Y_batch) return NULL;
data_t * y_label = (data_t *)calloc(BATCH_SIZE, sizeof(data_t));
int i, j;
for (i = 0; i < BATCH_SIZE; i++) {
data_t max = Y_batch->data_h[i*NUM_OUTPUTS];
for (j = 0; j < NUM_OUTPUTS; j++) {
data_t e = Y_batch->data_h[i*NUM_OUTPUTS+j];
if (e >= max) {
max = e;
y_label[i] = j;
}
}
}
return y_label;
}
data_t get_accuracy(Matrix * Y_pred, Matrix * Y_truth) {
data_t * Y_label_tr = get_prediction(Y_truth);
copy_matrix_D2H(Y_pred);
data_t * Y_label_pr = get_prediction(Y_pred);
int acc, i;
for (i = 0; i < BATCH_SIZE; i++) {
//printf("Pred: %d, Truth: %d\n", Y_label_pr[i], Y_label_tr[i]);
if (Y_label_tr[i] == Y_label_pr[i]) acc++;
}
return (data_t)acc/BATCH_SIZE;
}
data_t compute_accuracy(data_t * Y_tr, data_t * Y_pr) {
int acc = 0, i;
for (i = 0; i < BATCH_SIZE; i++)
if (Y_tr[i] == Y_pr[i]) acc++;
// printf("acc: %d\n", acc);
return ((data_t)acc/(data_t)BATCH_SIZE);
}
|
06eba86922cbd1de3e2cfd12ca8c93017aae20b3.cu
|
#include <stdio.h>
#include <assert.h>
#include "ffnn.cuh"
#include "nn/loss.cuh"
#include "nn_param.cuh"
#include "nn/nn_layer.cuh"
#include "nn/cuda_utils.cuh"
#include "nn/activations.cuh"
ffnn * ffnn_init() {
ffnn * nn = (ffnn *)malloc(sizeof(ffnn));
if (!nn) { printf("Unable to initialize ffnn\n"); return NULL; }
int l;
for (l = 0; l < NUM_LAYERS; l++) {
nn->layer[l] = (nnlayer *)malloc(sizeof(nnlayer));
}
nn->Y = matrix_init(BATCH_SIZE, NUM_OUTPUTS);
nn->dY = matrix_init(BATCH_SIZE, NUM_OUTPUTS);
matrix_allocate(nn->Y);
matrix_allocate(nn->dY);
return nn;
}
int ffnn_free(ffnn * nn) {
if (!nn) { printf("Unable to initialize ffnn\n"); return -1; }
int l;
for (l = 0; l < NUM_LAYERS; l++) {
int n = nnl_free(nn->layer[l]);
if (n) return -1;
}
matrix_free(nn->Y);
matrix_free(nn->dY);
free(nn);
return 0;
}
void add_layer(ffnn * nn, int l, int Wx, int Wy, char f) {
if (!nn) { printf("Unable to initialize ffnn\n"); exit(1); }
nn->layer[l] = nnl_init(l, Wx, Wy, f);
}
// Forward pass (fp) through layers of the neural net
Matrix * ffnn_fp_global(ffnn * nn, Matrix * X) {
nn->layer[0]->A = X;
// Forward Pass though layers
int l;
for (l = 0; l < NUM_LAYERS-1; l++) {
nn->layer[l]->Z = nnl_forward_pass_global(nn->layer[l], nn->layer[l]->A);
switch (nn->layer[l]->f) { // activation of current hidden layer
// activate Z of cur layer and forward pass as input of next layer
case 'r': relu_forward_pass_global(nn->layer[l+1]->A, nn->layer[l]->Z); break;
case 's': sigmoid_forward_pass_global(nn->layer[l+1]->A, nn->layer[l]->Z); break;
default: break;
}
}
// Forward Pass through output layer to get Prediction matrix Y
nn->layer[NUM_LAYERS-1]->Z = nnl_forward_pass_global(nn->layer[NUM_LAYERS-1], nn->layer[NUM_LAYERS-1]->A);
switch (nn->layer[NUM_LAYERS-1]->f) { // activation of Output layer
case 'r': relu_forward_pass_global(nn->Y, nn->layer[NUM_LAYERS-1]->Z); break;
case 's': sigmoid_forward_pass_global(nn->Y, nn->layer[NUM_LAYERS-1]->Z); break;
default: break;
}
return nn->Y;
}
// back propagation (bp) through the layers of the neural net
void ffnn_bp_global(ffnn * nn, Matrix * Y_pred, Matrix * Y_truth, data_t lr) {
dBCEloss(nn->dY, Y_pred, Y_truth);
// uncomment below to see loss at each epoch for each batch, recommended Batch-size =10, #epoch = 3 to 5
// printf("Loss\n");
// print_matrix_d(nn->dY);
// printf("\n");
// Back Prop through output layer
switch (nn->layer[NUM_LAYERS-1]->f) {
case 'r': relu_back_propagation_global(nn->layer[NUM_LAYERS-1], nn->dY, lr); break;
case 's': sigmoid_back_propagation_global(nn->layer[NUM_LAYERS-1], nn->dY, lr); break;
default: break;
}
nnl_back_propagation_global(nn->layer[NUM_LAYERS-1], nn->layer[NUM_LAYERS-1]->dZ, lr);
// uncomment below to see weight and bias updates for last layer
// printf("Weights\n");
// print_matrix_d(nn->layer[NUM_LAYERS-1]->W);
// printf("\n");
// printf("bias\n");
// print_matrix_d(nn->layer[NUM_LAYERS-1]->b);
// printf("\n");
// Back propagate through hidden linear
int l;
for (l = NUM_LAYERS-1; l > 0; l--) {
switch (nn->layer[l]->f) { // activation of current hidden layer
case 'r': relu_back_propagation_global(nn->layer[l-1], nn->layer[l]->dA, lr); break;
case 's': sigmoid_back_propagation_global(nn->layer[l-1], nn->layer[l]->dA, lr); break;
default: break;
}
nnl_back_propagation_global(nn->layer[l-1], nn->layer[l-1]->dZ, lr);
// uncomment below to see weight and bias updates for all layers (Not recommended)
// printf("Weights\n");
// print_matrix_d(nn->layer[NUM_LAYERS-1]->W);
// printf("\n");
// printf("bias\n");
// print_matrix_d(nn->layer[NUM_LAYERS-1]->b);
// printf("\n");
}
// need device synchronize because next iteration forward pass input depends on output
// from this iterations back propagation
cudaDeviceSynchronize();
}
data_t * get_prediction(Matrix * Y_batch) {
if (!Y_batch) return NULL;
data_t * y_label = (data_t *)calloc(BATCH_SIZE, sizeof(data_t));
int i, j;
for (i = 0; i < BATCH_SIZE; i++) {
data_t max = Y_batch->data_h[i*NUM_OUTPUTS];
for (j = 0; j < NUM_OUTPUTS; j++) {
data_t e = Y_batch->data_h[i*NUM_OUTPUTS+j];
if (e >= max) {
max = e;
y_label[i] = j;
}
}
}
return y_label;
}
data_t get_accuracy(Matrix * Y_pred, Matrix * Y_truth) {
data_t * Y_label_tr = get_prediction(Y_truth);
copy_matrix_D2H(Y_pred);
data_t * Y_label_pr = get_prediction(Y_pred);
int acc, i;
for (i = 0; i < BATCH_SIZE; i++) {
//printf("Pred: %d, Truth: %d\n", Y_label_pr[i], Y_label_tr[i]);
if (Y_label_tr[i] == Y_label_pr[i]) acc++;
}
return (data_t)acc/BATCH_SIZE;
}
data_t compute_accuracy(data_t * Y_tr, data_t * Y_pr) {
int acc = 0, i;
for (i = 0; i < BATCH_SIZE; i++)
if (Y_tr[i] == Y_pr[i]) acc++;
// printf("acc: %d\n", acc);
return ((data_t)acc/(data_t)BATCH_SIZE);
}
|
09b0067bd968e7a63e5f40cded5b892c830e2efb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <functional>
#include <utility>
#include <vector>
#include "caffe/layers/argmax_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ArgMaxForward(const int n, const int axis_dist,
const int dim, const bool has_axis, const bool out_max_val, const Dtype* bottom_data,
Dtype* top_data) {
CUDA_KERNEL_LOOP(i, n) {
int max_index = 0;
Dtype max_val = bottom_data[(i / axis_dist * dim) * axis_dist + i % axis_dist];
for (int j = 0; j < dim; ++j) {
Dtype curr_val = bottom_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist];
if (curr_val > max_val) {
max_val = curr_val;
max_index = j;
}
}
if (out_max_val) {
if (has_axis) {
// Produces max_val per axis
int index = (i / axis_dist) * axis_dist + i % axis_dist;
top_data[index]= max_val;
} else {
// Produces max_ind and max_val
top_data[2 * i] = Dtype(max_index);
top_data[2 * i + 1] = max_val;
}
} else {
// Produces max_ind per axis
int index = (i / axis_dist) * axis_dist + i % axis_dist;
top_data[index] = Dtype(max_index);
}
}
}
template <typename Dtype>
void ArgMaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int dim, axis_dist;
if (has_axis_) {
dim = bottom[0]->shape(axis_);
// Distance between values of axis in blob
axis_dist = bottom[0]->count(axis_) / dim;
} else {
dim = bottom[0]->count(1);
axis_dist = 1;
}
int num = bottom[0]->count() / dim;
if (top_k_ == 1) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ArgMaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(num)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num, axis_dist, dim, has_axis_, out_max_val_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
std::vector<std::pair<Dtype, int> > bottom_data_vector(dim);
for (int i = 0; i < num; ++i) {
for (int j = 0; j < dim; ++j) {
bottom_data_vector[j] = std::make_pair(
bottom_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j);
}
std::partial_sort(
bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
for (int j = 0; j < top_k_; ++j) {
if (out_max_val_) {
if (has_axis_) {
// Produces max_val per axis
top_data[(i / axis_dist * top_k_ + j) * axis_dist + i % axis_dist]
= bottom_data_vector[j].first;
} else {
// Produces max_ind and max_val
top_data[2 * i * top_k_ + j] = bottom_data_vector[j].second;
top_data[2 * i * top_k_ + top_k_ + j] = bottom_data_vector[j].first;
}
} else {
// Produces max_ind per axis
top_data[(i / axis_dist * top_k_ + j) * axis_dist + i % axis_dist]
= bottom_data_vector[j].second;
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FORWARD(ArgMaxLayer);
} // namespace caffe
|
09b0067bd968e7a63e5f40cded5b892c830e2efb.cu
|
#include <algorithm>
#include <functional>
#include <utility>
#include <vector>
#include "caffe/layers/argmax_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void ArgMaxForward(const int n, const int axis_dist,
const int dim, const bool has_axis, const bool out_max_val, const Dtype* bottom_data,
Dtype* top_data) {
CUDA_KERNEL_LOOP(i, n) {
int max_index = 0;
Dtype max_val = bottom_data[(i / axis_dist * dim) * axis_dist + i % axis_dist];
for (int j = 0; j < dim; ++j) {
Dtype curr_val = bottom_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist];
if (curr_val > max_val) {
max_val = curr_val;
max_index = j;
}
}
if (out_max_val) {
if (has_axis) {
// Produces max_val per axis
int index = (i / axis_dist) * axis_dist + i % axis_dist;
top_data[index]= max_val;
} else {
// Produces max_ind and max_val
top_data[2 * i] = Dtype(max_index);
top_data[2 * i + 1] = max_val;
}
} else {
// Produces max_ind per axis
int index = (i / axis_dist) * axis_dist + i % axis_dist;
top_data[index] = Dtype(max_index);
}
}
}
template <typename Dtype>
void ArgMaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
int dim, axis_dist;
if (has_axis_) {
dim = bottom[0]->shape(axis_);
// Distance between values of axis in blob
axis_dist = bottom[0]->count(axis_) / dim;
} else {
dim = bottom[0]->count(1);
axis_dist = 1;
}
int num = bottom[0]->count() / dim;
if (top_k_ == 1) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
ArgMaxForward<Dtype><<<CAFFE_GET_BLOCKS(num), CAFFE_CUDA_NUM_THREADS>>>(
num, axis_dist, dim, has_axis_, out_max_val_, bottom_data, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
const Dtype* bottom_data = bottom[0]->cpu_data();
Dtype* top_data = top[0]->mutable_cpu_data();
std::vector<std::pair<Dtype, int> > bottom_data_vector(dim);
for (int i = 0; i < num; ++i) {
for (int j = 0; j < dim; ++j) {
bottom_data_vector[j] = std::make_pair(
bottom_data[(i / axis_dist * dim + j) * axis_dist + i % axis_dist], j);
}
std::partial_sort(
bottom_data_vector.begin(), bottom_data_vector.begin() + top_k_,
bottom_data_vector.end(), std::greater<std::pair<Dtype, int> >());
for (int j = 0; j < top_k_; ++j) {
if (out_max_val_) {
if (has_axis_) {
// Produces max_val per axis
top_data[(i / axis_dist * top_k_ + j) * axis_dist + i % axis_dist]
= bottom_data_vector[j].first;
} else {
// Produces max_ind and max_val
top_data[2 * i * top_k_ + j] = bottom_data_vector[j].second;
top_data[2 * i * top_k_ + top_k_ + j] = bottom_data_vector[j].first;
}
} else {
// Produces max_ind per axis
top_data[(i / axis_dist * top_k_ + j) * axis_dist + i % axis_dist]
= bottom_data_vector[j].second;
}
}
}
}
}
INSTANTIATE_LAYER_GPU_FORWARD(ArgMaxLayer);
} // namespace caffe
|
7eebde1b437e35cae2260a4601c68db7c1ad71a0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "reduction.h"
namespace cg = cooperative_groups;
__device__ double getMax(double x, double y) {
return (x > y) ? x : y;
}
__device__ double getSum(double x, double y) {
return x + y;
}
__global__ void reduceKernelMax(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = getMax(sdata[tid], sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduceKernelSum(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = getSum(sdata[tid], sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
void reduce(int type, int size, int threads, int blocks, double *d_idata, double *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(double);
switch (type)
{
case MAXIMUM:
hipLaunchKernelGGL(( reduceKernelMax), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
case SUMMATION:
hipLaunchKernelGGL(( reduceKernelSum), dim3(dimGrid), dim3(dimBlock), smemSize, 0, d_idata, d_odata, size);
break;
default:
break;
}
}
|
7eebde1b437e35cae2260a4601c68db7c1ad71a0.cu
|
#include "reduction.h"
namespace cg = cooperative_groups;
__device__ double getMax(double x, double y) {
return (x > y) ? x : y;
}
__device__ double getSum(double x, double y) {
return x + y;
}
__global__ void reduceKernelMax(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = getMax(sdata[tid], sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
__global__ void reduceKernelSum(double *g_idata, double *g_odata, unsigned int n)
{
cg::thread_block cta = cg::this_thread_block();
extern __shared__ double sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
cg::sync(cta);
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] = getSum(sdata[tid], sdata[tid + s]);
}
cg::sync(cta);
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
void reduce(int type, int size, int threads, int blocks, double *d_idata, double *d_odata)
{
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
int smemSize = threads * sizeof(double);
switch (type)
{
case MAXIMUM:
reduceKernelMax<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
case SUMMATION:
reduceKernelSum<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, size);
break;
default:
break;
}
}
|
c351efddd1b803533132ab4f0f56c13848142fe8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_init_kernel;
int xdim0_init_kernel_h = -1;
__constant__ int ydim0_init_kernel;
int ydim0_init_kernel_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x,y,z) (x+xdim0_init_kernel*(y)+xdim0_init_kernel*ydim0_init_kernel*(z))
//user function
__device__
void init_kernel_gpu(double *val, int *idx){
if(idx[0]==0 || idx[0]==nx-1 || idx[1]==0 || idx[1]==ny-1 || idx[2]==0 || idx[2]==nz-1)
val[OPS_ACC0(0,0,0)] = 1.0;
else
val[OPS_ACC0(0,0,0)] = 0.0;
}
#undef OPS_ACC0
__global__ void ops_init_kernel(
double* __restrict arg0,
int arg_idx0, int arg_idx1, int arg_idx2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[3];
arg_idx[0] = arg_idx0+idx_x;
arg_idx[1] = arg_idx1+idx_y;
arg_idx[2] = arg_idx2+idx_z;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_init_kernel + idx_z * 1*1 * xdim0_init_kernel * ydim0_init_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
init_kernel_gpu(arg0, arg_idx);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_init_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,0)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(0,"init_kernel");
OPS_kernels[0].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int arg_idx[3];
#ifdef OPS_MPI
#ifdef OPS_LAZY
ops_block block = desc->block;
sub_block_list sb = OPS_sub_block_list[block->index];
#endif
arg_idx[0] = sb->decomp_disp[0]+start[0];
arg_idx[1] = sb->decomp_disp[1]+start[1];
arg_idx[2] = sb->decomp_disp[2]+start[2];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
arg_idx[2] = start[2];
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
if (xdim0 != xdim0_init_kernel_h || ydim0 != ydim0_init_kernel_h) {
hipMemcpyToSymbol( xdim0_init_kernel, &xdim0, sizeof(int) );
xdim0_init_kernel_h = xdim0;
hipMemcpyToSymbol( ydim0_init_kernel, &ydim0, sizeof(int) );
ydim0_init_kernel_h = ydim0;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[0].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_init_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], arg_idx[0], arg_idx[1], arg_idx[2],x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[0].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[0].mpi_time += t2-t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 0;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 0;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->function = ops_par_loop_init_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(0,"init_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
c351efddd1b803533132ab4f0f56c13848142fe8.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_init_kernel;
int xdim0_init_kernel_h = -1;
__constant__ int ydim0_init_kernel;
int ydim0_init_kernel_h = -1;
#undef OPS_ACC0
#define OPS_ACC0(x,y,z) (x+xdim0_init_kernel*(y)+xdim0_init_kernel*ydim0_init_kernel*(z))
//user function
__device__
void init_kernel_gpu(double *val, int *idx){
if(idx[0]==0 || idx[0]==nx-1 || idx[1]==0 || idx[1]==ny-1 || idx[2]==0 || idx[2]==nz-1)
val[OPS_ACC0(0,0,0)] = 1.0;
else
val[OPS_ACC0(0,0,0)] = 0.0;
}
#undef OPS_ACC0
__global__ void ops_init_kernel(
double* __restrict arg0,
int arg_idx0, int arg_idx1, int arg_idx2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[3];
arg_idx[0] = arg_idx0+idx_x;
arg_idx[1] = arg_idx1+idx_y;
arg_idx[2] = arg_idx2+idx_z;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_init_kernel + idx_z * 1*1 * xdim0_init_kernel * ydim0_init_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
init_kernel_gpu(arg0, arg_idx);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
#else
void ops_par_loop_init_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[2] = { arg0, arg1};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,2,range,0)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(0,"init_kernel");
OPS_kernels[0].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int arg_idx[3];
#ifdef OPS_MPI
#ifdef OPS_LAZY
ops_block block = desc->block;
sub_block_list sb = OPS_sub_block_list[block->index];
#endif
arg_idx[0] = sb->decomp_disp[0]+start[0];
arg_idx[1] = sb->decomp_disp[1]+start[1];
arg_idx[2] = sb->decomp_disp[2]+start[2];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
arg_idx[2] = start[2];
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
if (xdim0 != xdim0_init_kernel_h || ydim0 != ydim0_init_kernel_h) {
cudaMemcpyToSymbol( xdim0_init_kernel, &xdim0, sizeof(int) );
xdim0_init_kernel_h = xdim0;
cudaMemcpyToSymbol( ydim0_init_kernel, &ydim0, sizeof(int) );
ydim0_init_kernel_h = ydim0;
}
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
char *p_a[2];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 2);
ops_halo_exchanges(args,2,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[0].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_init_kernel<<<grid, tblock >>> ( (double *)p_a[0], arg_idx[0], arg_idx[1], arg_idx[2],x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[0].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 2);
ops_set_halo_dirtybit3(&args[0],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[0].mpi_time += t2-t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg0);
}
}
#ifdef OPS_LAZY
void ops_par_loop_init_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 0;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 0;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 2;
desc->args = (ops_arg*)malloc(2*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->function = ops_par_loop_init_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(0,"init_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
80675b2dbe76a47efc52e9d0de129e29e75e0f82.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel.h"
#include "helpers.cu"
#include "vector_tensor.cu"
#include "cuda_struct.h"
#include "constant.h"
#include "FFxtubes.h"
#define BWDSIDET
#define LONGITUDINAL
#define SQRTNT 1
// TO DO:
// Line 1420:
// Yes, very much a waste. The edge positions should be calculated from the vertex positions, we can
// load flags to determine if it is an insulator-crossing triangle and that is the proper way to handle that.
#define FOUR_PI 12.5663706143592
#define TEST (0) //iVertex == VERTCHOSEN)
#define TEST_ELEC_VISC_TRI (0) //iMinor == CHOSEN)
#define TESTNEUTVISC2 (0) // iMinor == CHOSEN)
#define TESTPRESSUREY (0) //iVertex == VERTCHOSEN)
#define TEST_T (0)
#define TEST3 (0)
#define TEST1 (0)
#define TESTTRI (0) // thermal pressure output & infer minor density & momflux_minor
#define TESTADVECT (0)
#define TESTADVECTZ (0)//iVertex == VERTCHOSEN)
#define TESTADVECTNEUT (0) //iVertex == VERTCHOSEN)
#define TESTIONVERTVISC (0)//(iVertex == VERTCHOSEN)
#define TESTNEUTVISC (0) // iVertex == VERTCHOSEN)
#define TESTVISC (0) //iMinor == CHOSEN)
#define TESTIONVISC (0)
#define TESTHEAT (0)
#define TESTHEATFULL (0)
#define TESTHEAT1 (0)
#define TESTTRI2 (0)
#define TESTTRI3 (0)
#define TESTHEAT2 (0)
#define TESTIONISE (0)
#define TESTOHMS (0) //iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define TEST_IONIZE (0) //iVertex == VERTCHOSEN)
#define TESTACCEL (0) //iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define TESTACCEL2 (0) //iMinor - BEGINNING_OF_CENTRAL == VERTCHOSEN)
#define TESTACCEL_X (0) // PopOhms output
#define TESTLAP (0)
#define TESTLAP2 (0) //(iMinor == CHOSEN1) || (iMinor == CHOSEN2))
#define TESTVEZ (0)//iMinor == CHOSEN)
#define TEST_VS_MATRIX (0) //iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define TEST_VS_MATRIX2 (0) // iVertex == VERTCHOSEN
#define TESTVNX (0)
#define TESTVNY (0) //iMinor == CHOSEN)//PopOhms
#define TESTVNY2 (0) // iMinor == CHOSEN) //neutral momflux
#define TESTVNY3 (0)// || (iVertex == VERTCHOSEN2))
#define TESTVNZ (0)//iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define TEST_ADV_HEAT_FLAG 0
#define TEST_ADV_MASS_FLAG 0
#define TESTVNXVERT (0)
#define TESTVNYVERT (0)
#define TEST_ACCEL_Y (0) // iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define VISCMAG 1
#define MIDPT_A
#define TEST_ACCEL_EZ (0)//iMinor == CHOSEN)
#define TEST_EPSILON_Y (0)
#define TEST_EPSILON_X (0)
#define TEST_EPSILON_Y_IMINOR (0)//iMinor == lChosen)
#define TEST_EPSILON_X_MINOR (0) // iMinor == CHOSEN)
#define ARTIFICIAL_RELATIVE_THRESH 1.0e10 // if we let it be more strict than heat thresh then it drives a difference generating heat!
#define ARTIFICIAL_RELATIVE_THRESH_HEAT 1.0e10 // typical initial density is 1e8 vs 1e18
#define LOW_THRESH_FOR_VISC_CALCS 1.0e10 // density. This should not be too much greater than the density where we do not soak away velocity and heat. At the moment it's 100 times.
#define MINIMUM_NU_EI_DENSITY 1.0e12
// Try excluding only if both sides are at this density -- it just doesn't matter.
// Just exclude to/from anything this sparse. It doesn't matter.
// Heat is the one that can be a problem as soaking away heat means that we never can grow our ionization level - we're basically stuck at 0
// unless a giant wave of ions sweeps in. We start at low ionization.. 1e8/1e18 = 1e-10.
// Change log. 090419: Change upwind density routine to just use n from the lowest cell that is upwind for at least 1 side.
// 230419: Change nu_n used in kappa_neut to be a lc of collision frequencies.
// 250419: Change to use min(ita_ours, ita_theirs). Maybe need to do same for kappa_par.
// Change to apportion visc heat from tri per N.
//const int Chosens[7] = { 25454, 86529, 25453, 86381, 25455, 86530, 25750 };
__device__ f64 ArtificialUpliftFactor(f64 n_i, f64 n_n)
{
// Used in ionization uplift and in heat inter-species transfer.
// At n_i = 1e9, nn 1e14 we want nn equiv 1e20 so 1e6 uplift
// We do not care much about small amt of neutrals as much as small amt of ions.
if (n_i + n_n > 1.0e15) return 1.0;
f64 t = (n_n*1.0e15 + n_i*(n_i + n_n)) / ((n_i + n_n)*(n_i + n_n));
return min(t*t,1.0e6);
// Having to boost up when < 1e15 because our dodgy point has > 1e14.
}
__device__ f64 ArtificialUpliftFactor_MT(f64 n_i, f64 n_n)
{
if (n_i > 1.0e13) return 1.0;
// Used in crushing v to be hydrodynamic and in viscous ita.
f64 additional_nn = min(exp(-n_i*n_i/0.5e24)*(1.0e30 / (n_i)), 1.0e20); // high effective density to produce hydrodynamics
// n <= 1e10 : additional_nn ~= 1e20
// n == 1e11 : additional_nn ~= 1e19
// n == 1e12 : additional_nn ~= 1e17
// n == 1e13 : additional_nn ~= 1e-70
return 1.0 + additional_nn /n_n;
}
__device__ __forceinline__ void CalculateCircumcenter(f64_vec2 * p_cc, f64_vec2 poscorner0, f64_vec2 poscorner1, f64_vec2 poscorner2)
{
f64_vec2 Bb = poscorner1 - poscorner0;
f64_vec2 C = poscorner2 - poscorner0;
f64 D = 2.0*(Bb.x*C.y - Bb.y*C.x);
f64 modB = Bb.x*Bb.x + Bb.y*Bb.y;
f64 modC = C.x*C.x + C.y*C.y;
p_cc->x = (C.y*modB - Bb.y*modC) / D + poscorner0.x;
p_cc->y = (Bb.x*modC - C.x*modB) / D + poscorner0.y;
// formula agrees with wikipedia so why does it give a stupid result.
}
__device__ __forceinline__ bool TestDomainPos(f64_vec2 pos)
{
return (
(pos.x*pos.x + pos.y*pos.y > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
&&
(pos.x*pos.x + (pos.y - CATHODE_ROD_R_POSITION)*(pos.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
);
}
__device__ f64 GetRecombinationRate_given_v(f64 const Te, int i_v)
{
f64 const TeeV = Te / kB;
f64 const Tesq = TeeV*TeeV;
f64 const Te3 = Tesq*TeeV;
f64 rate, rate1, rate2;
if (Te > 4.75e-11) return 0.0;
if (Te < 1.875e-12) { // Magic numbers!!
rate = (recomb_coeffs[i_v][0][4] + recomb_coeffs[i_v][0][3] * TeeV
+ recomb_coeffs[i_v][0][2] * Tesq + recomb_coeffs[i_v][0][1] * Te3);
if (i_v < 7) {
rate /= (1.0 - recomb_coeffs[i_v][0][0] * TeeV);
} else {
rate += exp(-TeeV*0.5)*recomb_coeffs[i_v][0][0];
};
} else {
if (Te < 2.25e-12) {
rate1 = (recomb_coeffs[i_v][0][4] + recomb_coeffs[i_v][0][3] * TeeV
+ recomb_coeffs[i_v][0][2] * Tesq + recomb_coeffs[i_v][0][1] * Te3);
rate2 = (recomb_coeffs[i_v][1][4] + recomb_coeffs[i_v][1][3] * TeeV
+ recomb_coeffs[i_v][1][2] * Tesq + recomb_coeffs[i_v][1][1] * Te3);
if (i_v < 7) {
rate1 /= (1.0 - recomb_coeffs[i_v][0][0] * TeeV);
rate2 /= (1.0 - recomb_coeffs[i_v][1][0] * TeeV);
}
else {
rate1 += exp(-TeeV*0.5)*recomb_coeffs[i_v][0][0];
rate2 += exp(-TeeV*0.5)*recomb_coeffs[i_v][1][0];
};
f64 ppn_high = (Te - 1.875e-12) / (2.25e-12 - 1.875e-12);
f64 ppn_low = (2.25e-12 - Te) / (2.25e-12-1.875e-12);
rate = rate1*ppn_low + rate2*ppn_high;
} else {
if (Te < 1.05e-11) {
rate = (recomb_coeffs[i_v][1][4] + recomb_coeffs[i_v][1][3] * TeeV
+ recomb_coeffs[i_v][1][2] * Tesq + recomb_coeffs[i_v][1][1] * Te3);
if (i_v < 7) {
rate /= (1.0 - recomb_coeffs[i_v][1][0] * TeeV);
}
else {
rate += exp(-TeeV*0.5)*recomb_coeffs[i_v][1][0];
};
} else {
if (Te < 1.0875e-11) {
rate1 = (recomb_coeffs[i_v][1][4] + recomb_coeffs[i_v][1][3] * TeeV
+ recomb_coeffs[i_v][1][2] * Tesq + recomb_coeffs[i_v][1][1] * Te3);
rate2 = (recomb_coeffs[i_v][2][4] + recomb_coeffs[i_v][2][3] * TeeV
+ recomb_coeffs[i_v][2][2] * Tesq + recomb_coeffs[i_v][2][1] * Te3);
if (i_v < 7) {
rate1 /= (1.0 - recomb_coeffs[i_v][1][0] * TeeV);
rate2 /= (1.0 - recomb_coeffs[i_v][2][0] * TeeV);
} else {
rate1 += exp(-TeeV*0.5)*recomb_coeffs[i_v][1][0];
rate2 += exp(-TeeV*0.5)*recomb_coeffs[i_v][2][0];
};
f64 ppn_high = (Te - 1.05e-11) / (1.0875e-11 - 1.05e-11);
f64 ppn_low = (1.0875e-11 - Te) / (1.0875e-11 - 1.05e-11);
rate = rate1*ppn_low + rate2*ppn_high;
} else {
rate = (recomb_coeffs[i_v][2][4] + recomb_coeffs[i_v][2][3] * TeeV
+ recomb_coeffs[i_v][2][2] * Tesq + recomb_coeffs[i_v][2][1] * Te3);
if (i_v < 7) {
rate /= (1.0 - recomb_coeffs[i_v][2][0] * TeeV);
} else {
rate += exp(-TeeV*0.5)*recomb_coeffs[i_v][2][0];
};
};
};
};
};
return rate;
}
__device__ f64 GetIonizationRate_given_v(f64 const Te, int i_v)
{
f64 TeeV = Te / kB;
if (Te > ionize_temps[i_v][9]) {
TeeV = ionize_temps[i_v][9] / kB;
}
f64 Tesq = TeeV*TeeV;
f64 Te3 = Tesq*TeeV;
f64 Te4 = Tesq*Tesq;
f64 rate, rate1, rate2;
bool b_exp[5];
memset(b_exp, 0, sizeof(bool) * 5);
if (i_v < 18) {
b_exp[0] = true; b_exp[1] = true; b_exp[2] = true;
};
if (i_v == 18) {
b_exp[0] = true; b_exp[1] = true;
}
//printf("i_v %d b_exp %d %d %d %d %d \n", i_v, (b_exp[0]) ? 1 : 0, (b_exp[1]) ? 1 : 0, (b_exp[2]) ? 1 : 0, (b_exp[3]) ? 1 : 0, (b_exp[4]) ? 1 : 0);
if (Te < ionize_temps[i_v][0]) {
if (i_v < 19) {
rate = 0.0;
} // let's say 18 is where we treat as over critical velocity.
else {
TeeV = ionize_temps[i_v][0] / kB; // return low end value
rate = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
};
} else {
if (Te < ionize_temps[i_v][1]) {
rate = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
if (b_exp[0]) rate = exp(rate);
}
else {
if (Te < ionize_temps[i_v][2]) {
rate1 = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
rate2 = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
if (b_exp[0]) rate1 = exp(rate1);
if (b_exp[1]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][1]) / (ionize_temps[i_v][2] - ionize_temps[i_v][1]);
f64 ppn_low = (ionize_temps[i_v][2] - Te) / (ionize_temps[i_v][2] - ionize_temps[i_v][1]);
rate = rate1*ppn_low + rate2*ppn_high;
}
else {
if (Te < ionize_temps[i_v][3])
{
rate = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
if (b_exp[1]) rate = exp(rate);
} else {
if (Te < ionize_temps[i_v][4]) {
rate1 = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
rate2 = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
if (b_exp[1]) rate1 = exp(rate1);
if (b_exp[2]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][3]) / (ionize_temps[i_v][4] - ionize_temps[i_v][3]);
f64 ppn_low = (ionize_temps[i_v][4] - Te) / (ionize_temps[i_v][4] - ionize_temps[i_v][3]);
rate = rate1*ppn_low + rate2*ppn_high;
}
else {
if (Te < ionize_temps[i_v][5]) {
rate = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
if (b_exp[2]) rate = exp(rate);
}
else {
if (Te < ionize_temps[i_v][6]) {
rate1 = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
rate2 = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[2]) rate1 = exp(rate1);
if (b_exp[3]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][5]) / (ionize_temps[i_v][6] - ionize_temps[i_v][5]);
f64 ppn_low = (ionize_temps[i_v][6] - Te) / (ionize_temps[i_v][6] - ionize_temps[i_v][5]);
rate = rate1*ppn_low + rate2*ppn_high;
}
else {
if (Te < ionize_temps[i_v][7]) {
rate = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[3]) rate = exp(rate); // it is always false anyway
}
else {
if (Te < ionize_temps[i_v][8]) {
rate1 = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
rate2 = (ionize_coeffs[i_v][4][4] + ionize_coeffs[i_v][4][3] * TeeV
+ ionize_coeffs[i_v][4][2] * Tesq + ionize_coeffs[i_v][4][1] * Te3
+ ionize_coeffs[i_v][4][0] * Te4);
if (b_exp[3]) rate1 = exp(rate1); // it is always false anyway
if (b_exp[4]) rate2 = exp(rate2); // it is always false anyway
f64 ppn_high = (Te - ionize_temps[i_v][7]) / (ionize_temps[i_v][8] - ionize_temps[i_v][7]);
f64 ppn_low = (ionize_temps[i_v][8] - Te) / (ionize_temps[i_v][8] - ionize_temps[i_v][7]);
rate = rate1*ppn_low + rate2*ppn_high;
}
else {
rate = (ionize_coeffs[i_v][4][4] + ionize_coeffs[i_v][4][3] * TeeV
+ ionize_coeffs[i_v][4][2] * Tesq + ionize_coeffs[i_v][4][1] * Te3
+ ionize_coeffs[i_v][4][0] * Te4);
if (b_exp[4]) rate = exp(rate); // it is always false anyway
};
}
};
};
};
};
};
};
};
return rate;
}
__device__ f64 GetIonizationRate_given_v_Debug(f64 const Te, int i_v)
{
f64 TeeV = Te / kB;
if (Te > ionize_temps[i_v][9]) {
TeeV = ionize_temps[i_v][9] / kB;
}
f64 Tesq = TeeV*TeeV;
f64 Te3 = Tesq*TeeV;
f64 Te4 = Tesq*Tesq;
f64 rate, rate1, rate2;
bool b_exp[5];
memset(b_exp, 0, sizeof(bool) * 5);
if (i_v < 18) {
b_exp[0] = true; b_exp[1] = true; b_exp[2] = true;
};
if (i_v == 18) {
b_exp[0] = true; b_exp[1] = true;
}
printf("i_v %d b_exp %d %d %d %d %d \n", i_v, (b_exp[0]) ? 1 : 0, (b_exp[1]) ? 1 : 0, (b_exp[2]) ? 1 : 0, (b_exp[3]) ? 1 : 0, (b_exp[4]) ? 1 : 0);
if (Te < ionize_temps[i_v][0]) {
if (i_v < 19) {
rate = 0.0;
printf("Te %1.12E was below %1.12E and we returned 0 ionization rate.\n", Te, ionize_temps[i_v][0]);
} // let's say 18 is where we treat as over critical velocity.
else {
TeeV = ionize_temps[i_v][0] / kB; // return low end value
rate = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
printf("used low end rate: %1.9E \n", rate);
};
} else {
if (Te < ionize_temps[i_v][1]) {
rate = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
if (b_exp[0]) rate = exp(rate);
printf("i_v %d Te %1.8E b_exp[0] %d 0-1 rate %1.8E coeffs[0] \n", i_v, Te,
(b_exp[0] ? 1:0), rate, ionize_coeffs[i_v][0][0]);
} else {
if (Te < ionize_temps[i_v][2]) {
rate1 = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
rate2 = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
if (b_exp[0]) rate1 = exp(rate1);
if (b_exp[1]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][1]) / (ionize_temps[i_v][2] - ionize_temps[i_v][1]);
f64 ppn_low = (ionize_temps[i_v][2] - Te) / (ionize_temps[i_v][2] - ionize_temps[i_v][1]);
rate = rate1*ppn_low + rate2*ppn_high;
printf("i_v %d Te %1.8E b_exp[0] %d b_exp[1] %d 1-2 rate %1.8E coeffs[1][0] \n", i_v, Te,
(b_exp[0] ? 1 : 0), (b_exp[1] ? 1 : 0), rate, ionize_coeffs[i_v][1][0]);
} else {
if (Te < ionize_temps[i_v][3])
{
rate = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
if (b_exp[1]) rate = exp(rate);
printf("i_v %d Te %1.8E b_exp[0] %d b_exp[1] %d Temps2-3 = 1 rate %1.8E coeffs[1][0] \n", i_v, Te,
(b_exp[0] ? 1 : 0), (b_exp[1] ? 1 : 0), rate, ionize_coeffs[i_v][1][0]);
} else {
if (Te < ionize_temps[i_v][4]) {
rate1 = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
rate2 = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
if (b_exp[1]) rate1 = exp(rate1);
if (b_exp[2]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][3]) / (ionize_temps[i_v][4] - ionize_temps[i_v][3]);
f64 ppn_low = (ionize_temps[i_v][4] - Te) / (ionize_temps[i_v][4] - ionize_temps[i_v][3]);
rate = rate1*ppn_low + rate2*ppn_high;
printf("i_v %d Te %1.8E b_exp[1] %d b_exp[2] %d 3-4 -> 1-2 rate %1.8E coeffs[2][0] \n", i_v, Te,
(b_exp[1] ? 1 : 0), (b_exp[2] ? 1 : 0), rate, ionize_coeffs[i_v][2][0]);
} else {
if (Te < ionize_temps[i_v][5]) {
rate = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
if (b_exp[2]) rate = exp(rate);
printf("i_v %d Te %1.8E b_exp[2] %d b_exp[3] %d 4-5 rate %1.8E coeffs[3][0] \n", i_v, Te,
(b_exp[2] ? 1 : 0), (b_exp[3] ? 1 : 0), rate, ionize_coeffs[i_v][3][0]);
} else {
if (Te < ionize_temps[i_v][6]) {
rate1 = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
rate2 = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[2]) rate1 = exp(rate1);
if (b_exp[3]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][5]) / (ionize_temps[i_v][6] - ionize_temps[i_v][5]);
f64 ppn_low = (ionize_temps[i_v][6] - Te) / (ionize_temps[i_v][6] - ionize_temps[i_v][5]);
rate = rate1*ppn_low + rate2*ppn_high;
printf("i_v %d Te %1.8E b_exp[2] %d b_exp[3] %d 5-6-> 2-3 rate %1.8E coeffs[3][0] \n", i_v, Te,
(b_exp[2] ? 1 : 0), (b_exp[3] ? 1 : 0), rate, ionize_coeffs[i_v][3][0]);
} else {
if (Te < ionize_temps[i_v][7]) {
rate = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[3]) rate = exp(rate); // it is always false anyway
printf("i_v %d Te %1.8E b_exp[3] %d b_exp[3] %d 6-7 rate %1.8E coeffs[3][0] \n", i_v, Te,
(b_exp[3] ? 1 : 0), (b_exp[3] ? 1 : 0), rate, ionize_coeffs[i_v][3][0]);
} else {
if (Te < ionize_temps[i_v][8]) {
rate1 = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[3]) rate1 = exp(rate1); // it is always false anyway
rate2 = (ionize_coeffs[i_v][4][4] + ionize_coeffs[i_v][4][3] * TeeV
+ ionize_coeffs[i_v][4][2] * Tesq + ionize_coeffs[i_v][4][1] * Te3
+ ionize_coeffs[i_v][4][0] * Te4);
if (b_exp[4]) rate2 = exp(rate2); // it is always false anyway
f64 ppn_high = (Te - ionize_temps[i_v][7]) / (ionize_temps[i_v][8] - ionize_temps[i_v][7]);
f64 ppn_low = (ionize_temps[i_v][8] - Te) / (ionize_temps[i_v][8] - ionize_temps[i_v][7]);
rate = rate1*ppn_low + rate2*ppn_high;
printf("i_v %d Te %1.8E b_exp[3] %d b_exp[4] %d 7-8 rate %1.8E coeffs[4][0] \n", i_v, Te,
(b_exp[3] ? 1 : 0), (b_exp[4] ? 1 : 0), rate, ionize_coeffs[i_v][4][0]);
} else {
rate = (ionize_coeffs[i_v][4][4] + ionize_coeffs[i_v][4][3] * TeeV
+ ionize_coeffs[i_v][4][2] * Tesq + ionize_coeffs[i_v][4][1] * Te3
+ ionize_coeffs[i_v][4][0] * Te4);
if (b_exp[4]) rate = exp(rate); // it is always false anyway
printf("i_v %d Te %1.8E b_exp[4] %d 8 -> 4 rate %1.8E coeffs[4][0] \n", i_v, Te,
(b_exp[4] ? 1 : 0), rate, ionize_coeffs[i_v][4][0]);
};
}
};
};
};
};
};
};
};
return rate;
}
__device__ f64 GetIonizationRates(f64 const Te, f64 const v, f64 * p_Recombo_rate)
{
int i_vleft, i_vright;
f64 vleft, vright;
f64 ppn_right, ppn_left;
if (v < 1.0e7) {
i_vleft = (int)(v / 2.0e6);
i_vright = i_vleft + 1;
vleft = 2.0e6*(double)(i_vleft);
vright = 2.0e6*(double)(i_vright); // at most 1e7
ppn_right = (v - vleft) / (vright - vleft);
ppn_left = (vright - v) / (vright - vleft);
} else {
if (v > 2.7e8) {
i_vleft = 31;
i_vright = 31;
ppn_left = 1.0;
ppn_right = 0.0;
vleft = 1.0e7*(double)(i_vleft - 4);
vright = 1.0e7*(double)(i_vright - 4); // careful
} else {
i_vleft = 4 + (int)(v / 1.0e7); // cHeck ?
if (i_vleft >= 31) i_vleft = 30;
i_vright = i_vleft + 1;
if (i_vright >= 32) i_vright = 31;
vleft = 1.0e7*(double)(i_vleft - 4);
vright = 1.0e7*(double)(i_vright - 4); // careful
ppn_right = (v - vleft) / (vright - vleft);
ppn_left = (vright - v) / (vright - vleft);
};
};
f64 rate_left = GetIonizationRate_given_v(Te, i_vleft);
f64 rate_right = GetIonizationRate_given_v(Te, i_vright);
f64 recomb_rate_left = GetRecombinationRate_given_v(Te, i_vleft);
f64 recomb_rate_right = GetRecombinationRate_given_v(Te, i_vright);
// now we need to go again given which v column to another function!
f64 rate = rate_left*ppn_left + rate_right*ppn_right;
*p_Recombo_rate = recomb_rate_left*ppn_left + recomb_rate_right*ppn_right;
return rate;
}
__device__ f64 GetIonizationRatesDebug(f64 const Te, f64 const v, f64 * p_Recombo_rate)
{
int i_vleft, i_vright;
f64 vleft, vright;
f64 ppn_right, ppn_left;
if (v < 1.0e7) {
i_vleft = (int)(v / 2.0e6);
i_vright = i_vleft + 1;
vleft = 2.0e6*(double)(i_vleft);
vright = 2.0e6*(double)(i_vright); // at most 1e7
ppn_right = (v - vleft) / (vright - vleft);
ppn_left = (vright - v) / (vright - vleft);
printf("GIRD small v: vleft %1.8E vright %1.8E ppn %1.8E %1.8E \n",
vleft, vright, ppn_left, ppn_right);
}
else {
if (v > 2.7e8) {
i_vleft = 31;
i_vright = 31;
ppn_left = 1.0;
ppn_right = 0.0;
vleft = 1.0e7*(double)(i_vleft - 4);
vright = 1.0e7*(double)(i_vright - 4); // careful
printf("GIRD high v: vleft %1.8E vright %1.8E ppn %1.8E %1.8E \n",
vleft, vright, ppn_left, ppn_right);
}
else {
i_vleft = 4 + (int)(v / 1.0e7); // cHeck ?
if (i_vleft >= 31) i_vleft = 30;
i_vright = i_vleft + 1;
vleft = 1.0e7*(double)(i_vleft - 4);
vright = 1.0e7*(double)(i_vright - 4); // careful
ppn_right = (v - vleft) / (vright - vleft);
ppn_left = (vright - v) / (vright - vleft);
printf("GIRD moderate v: vleft %1.8E vright %1.8E ppn %1.8E %1.8E \n",
vleft, vright, ppn_left, ppn_right);
};
};
f64 rate_left = GetIonizationRate_given_v_Debug(Te, i_vleft);
f64 rate_right = GetIonizationRate_given_v_Debug(Te, i_vright);
f64 recomb_rate_left = GetRecombinationRate_given_v(Te, i_vleft);
f64 recomb_rate_right = GetRecombinationRate_given_v(Te, i_vright);
printf("GIRD : rate_left %1.8E rate_right %1.8E \n", rate_left, rate_right);
// now we need to go again given which v column to another function!
f64 rate = rate_left*ppn_left + rate_right*ppn_right;
*p_Recombo_rate = recomb_rate_left*ppn_left + recomb_rate_right*ppn_right;
return rate;
}
__global__ void kernelCompare(
f64_vec2 * __restrict__ p_epsxy,
f64 * __restrict__ p_epsiz,
f64 * __restrict__ p_epsez,
f64_vec2 * __restrict__ p_epsxyp,
f64 * __restrict__ p_epsizp,
f64 * __restrict__ p_epsezp,
f64 * __restrict__ p_distance
)
{
long const index = blockDim.x*blockIdx.x + threadIdx.x;
f64_vec2 eps1 = p_epsxy[index];
f64_vec2 eps2 = p_epsxyp[index];
f64 diff = eps1.x - eps2.x;
f64 epsez = p_epsez[index];
f64 epsezp = p_epsezp[index];
diff = epsez - epsezp;
if (index == CHOSEN) printf("%d epsez %1.14E pred %1.14E diff %1.8E ppn %1.4E\n",
index, epsez, epsezp, diff, diff/epsez);
p_distance[index] = fabs(diff/(fabs(epsez)+1.0e+2));
/*if ((fabs(diff) > 1.0e-10) && (fabs(diff) > fabs(1.0e-8*eps2.x))) printf("%d x dimension : %1.10E %1.10E\n", index,
eps1.x, eps2.x);
diff = eps1.y - eps2.y;
if ((fabs(diff) > 1.0e-10) && (fabs(diff) > 1.0e-8*fabs(eps2.y))) printf("%d y dimension : %1.10E %1.10E \n", index,
eps1.y, eps2.y);
f64 epsiz = p_epsiz[index];
f64 epsizp = p_epsizp[index];
diff = epsiz - epsizp;
if ((fabs(diff) > 1.0e-10) && (fabs(diff) > 1.0e-8*fabs(epsizp))) printf("%d iz : %1.10E %1.10E \n", index,
epsiz, epsizp);
f64 epsez = p_epsez[index];
f64 epsezp = p_epsezp[index];
diff = epsez - epsezp;
if ((fabs(diff) > 1.0e-10) && (fabs(diff) > 1.0e-8*fabs(epsezp))) printf("%d ez : %1.10E %1.10E \n", index,
epsez, epsezp);*/
// we do not want it to null out the routine
p_epsxy[index] = eps1;
p_epsez[index] = epsez;
}
__global__ void kernelSplitIntoSeedRegressors
(
v4 * __restrict__ p_move,
f64_vec3 * __restrict__ p_regr_i,
f64_vec3 * __restrict__ p_regr_e,
f64_vec2 * __restrict__ p_epsxy
) {
long const iMinor = threadIdx.x + blockIdx.x*blockDim.x;
v4 v = p_move[iMinor];
// f64_vec2 eps = p_epsxy[iMinor];
// leave epsilon out for now - would serve as multiplying factor.
f64_vec3 regr_i, regr_e;
regr_e.x = 0.0; regr_e.y = 0.0;
regr_i.x = v.vxy.x; regr_i.y = v.vxy.y;
regr_i.z = v.viz;
regr_e.z = v.vez;
p_regr_i[iMinor] = regr_i;
p_regr_e[iMinor] = regr_e;
}
__global__ void kernelCalcJacobi_Viscosity(
structural * __restrict__ p_info_minor,
f64_vec2 * __restrict__ p_epsilon_xy,
f64 * __restrict__ p_epsilon_iz,
f64 * __restrict__ p_epsilon_ez,
f64_tens3 * __restrict__ p_matrix_i,
f64_tens3 * __restrict__ p_matrix_e, // inverted matrix R^-1 so Jacobi = R^-1 epsilon
f64_vec3 * __restrict__ p_Jacobi_i,
f64_vec3 * __restrict__ p_Jacobi_e)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
structural info = p_info_minor[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
f64_vec2 epsilon_xy = p_epsilon_xy[iMinor];
f64 epsilon_iz = p_epsilon_iz[iMinor];
f64 epsilon_ez = p_epsilon_ez[iMinor];
f64_tens3 matrix = p_matrix_i[iMinor];
p_Jacobi_i[iMinor] = matrix*Make3(epsilon_xy, epsilon_iz);
matrix = p_matrix_e[iMinor];
p_Jacobi_e[iMinor] = matrix*Make3(epsilon_xy, epsilon_ez);
} else {
memset(&(p_Jacobi_i[iMinor]), 0, sizeof(f64_vec3));
memset(&(p_Jacobi_e[iMinor]), 0, sizeof(f64_vec3));
}
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX)) {
f64_vec2 epsilon_xy = p_epsilon_xy[iVertex + BEGINNING_OF_CENTRAL];
f64 epsilon_iz = p_epsilon_iz[iVertex + BEGINNING_OF_CENTRAL];
f64 epsilon_ez = p_epsilon_ez[iVertex + BEGINNING_OF_CENTRAL];
f64_tens3 matrix = p_matrix_i[iVertex + BEGINNING_OF_CENTRAL];
p_Jacobi_i[iVertex + BEGINNING_OF_CENTRAL] = matrix*Make3(epsilon_xy, epsilon_iz);
matrix = p_matrix_e[iVertex + BEGINNING_OF_CENTRAL];
p_Jacobi_e[iVertex + BEGINNING_OF_CENTRAL] = matrix*Make3(epsilon_xy, epsilon_ez);
} else {
memset(&(p_Jacobi_i[iVertex + BEGINNING_OF_CENTRAL]), 0, sizeof(f64_vec3));
memset(&(p_Jacobi_e[iVertex + BEGINNING_OF_CENTRAL]), 0, sizeof(f64_vec3));
};
}
}
__global__ void kernelCalc_Matrices_for_Jacobi_Viscosity(
f64 const hsub,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_parallel_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_ita_parallel_elec_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_elec_minor, // nT / nu ready to look up
f64_vec3 * __restrict__ p_B_minor,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64_tens3 * __restrict__ p_matrix_i,
f64_tens3 * __restrict__ p_matrix_e
)
{
//__shared__ v4 shared_vie[threadsPerTileMinor]; // sort of thing we want as input
// Not used, right? Nothing nonlinear?
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_B[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
//__shared__ v4 shared_vie_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_B_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// 4+2+2+1+1 *1.5 = 15 per thread. That is possibly as slow as having 24 per thread.
// Thus putting some stuff in shared may speed up if there are spills.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64_vec2 opppos, prevpos, nextpos;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_B[threadIdx.x] = p_B_minor[iMinor].xypart();
shared_ita_par[threadIdx.x] = p_ita_parallel_ion_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_ion_minor[iMinor];
// Perhaps the real answer is this. Advection and therefore advective momflux
// do not need to be recalculated very often at all. At 1e6 cm/s, we aim for 1 micron,
// get 1e-10s to actually do the advection !!
// So an outer cycle. Still limiting the number of total things in a minor tile. We might like 384 = 192*2.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_B_verts[threadIdx.x] = p_B_minor[iVertex + BEGINNING_OF_CENTRAL].xypart();
if ((info.flag == DOMAIN_VERTEX))
{
// memcpy(&(shared_vie_verts[threadIdx.x]), &(p_vie_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(v4));
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
// memset(&(shared_vie_verts[threadIdx.x]), 0, sizeof(v4));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
f64_vec2 cc0, cc1;
if (threadIdx.x < threadsPerTileMajor) {
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
if (info.flag == DOMAIN_VERTEX)
//|| (info.flag == OUTERMOST)) // !!!!!!!!!!!!!!!!
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
f64_tens3 J; // Jacobean
memset(&J, 0, sizeof(f64_tens3));
//d_eps_x_by_d_vx = 1.0;
J.xx = 1.0;
J.yy = 1.0;
J.zz = 1.0;
// d_eps_z_by_d_viz = 1.0; // Note that eps includes v_k+1
if (shared_ita_par_verts[threadIdx.x] > 0.0) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ci;
// ** Be especially vigilant to the changes we need to make to go from ion to electron.
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
// Now sort out anticlock vars:
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
// Order of calculations may help things to go out/into scope at the right times so careful with that.
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//gradvy.y = -0.5*(
// (our_v.vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
// + (prev_v.vxy.y + our_v.vxy.y)*(prevpos.x - info.pos.x)
// + (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
// + (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
// ) / area_quadrilateral;
//
// so we want to know, eps += U v_self for U 4x4
f64 grad_vjdx_coeff_on_vj_self = 0.5*(prevpos.y - nextpos.y) / area_quadrilateral;
f64 grad_vjdy_coeff_on_vj_self = 0.5*(nextpos.x - prevpos.x) / area_quadrilateral;
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
{
f64_vec2 opp_B;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
f64 ita_theirs = p_ita_parallel_ion_minor[izTri[i]];
f64 nu_theirs = p_nu_ion_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = ita_theirs;
nu = nu_theirs;
};
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
} // Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (ita_par > 0.0) {
Augment_Jacobean(&J,
hsub / (p_n_minor[iVertex + BEGINNING_OF_CENTRAL].n*p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] * m_ion),
edge_normal, ita_par, nu, omega_ci,
grad_vjdx_coeff_on_vj_self,
grad_vjdy_coeff_on_vj_self
);
};
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
}; // next i
}; // ita_par > 0
f64_tens3 result;
J.Inverse(result);
memcpy(&(p_matrix_i[iVertex + BEGINNING_OF_CENTRAL]), &result, sizeof(f64_tens3));
// inverted it so that we are ready to put Jacobi = result.eps
} else {
// NOT domain vertex: Do nothing
// NOTE: We did not include OUTERMOST. Justification / effect ??
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
//if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
{
long izNeighMinor[6];
char szPBC[6];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_tens3 J; // Jacobean
memset(&J, 0, sizeof(f64_tens3));
//d_eps_x_by_d_vx = 1.0;
J.xx = 1.0;
J.yy = 1.0;
J.zz = 1.0;
if (shared_ita_par[threadIdx.x] > 0.0) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ci;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
// nu = 1.0e10; // DEBUG
bool bUsableSide = true;
{
f64_vec2 opp_B(0.0, 0.0);
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
f64 ita_par_opp = p_ita_parallel_ion_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_ion_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs; // Did I know we were doing this? We use the MINIMUM ita ?
// . We should probably stop that.
}
if (ita_par_opp == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (bUsableSide) {
// New definition of endpoint of minor edge:
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
f64 grad_vjdx_coeff_on_vj_self = 0.5*(prevpos.y - nextpos.y) / area_quadrilateral;
f64 grad_vjdy_coeff_on_vj_self = 0.5*(nextpos.x - prevpos.x) / area_quadrilateral;
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
// can't do prev_v == 0.0
// have to see if prev pos inside ins.
if (prevpos.dot(prevpos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER) // prev is in the insulator.
{
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
grad_vjdx_coeff_on_vj_self = 0.5*((info.pos.y - nextpos.y)+ (opppos.y - info.pos.y)) / area_triangle;
grad_vjdy_coeff_on_vj_self = -0.5*((info.pos.x - nextpos.x)+ (opppos.x - info.pos.x)) / area_triangle;
}
else {
if (nextpos.dot(nextpos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER) // prev is in the insulator.
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
grad_vjdx_coeff_on_vj_self = 0.5*(
(prevpos.y - info.pos.y)
+ (info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
grad_vjdy_coeff_on_vj_self = -0.5*(
(prevpos.x - info.pos.x)
+ (info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
};
};
};
};
Augment_Jacobean(&J,
hsub / (p_n_minor[iMinor].n * p_AreaMinor[iMinor] * m_ion),
edge_normal, ita_par, nu, omega_ci,
grad_vjdx_coeff_on_vj_self,
grad_vjdy_coeff_on_vj_self
);
}
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
};
}; // ita_par > 0.0
f64_tens3 result;
J.Inverse(result);
memcpy(&(p_matrix_i[iMinor]), &result, sizeof(f64_tens3));
} else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
__syncthreads();
// Now do electron: overwrite ita and nu, copy-paste the above codes very carefully
shared_ita_par[threadIdx.x] = p_ita_parallel_elec_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_elec_minor[iMinor];
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX)) // keeping consistent with ion above where we did put OUTERMOST here
{// but we set ita to 0 in the pre routine for outermost.
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
}
else {
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
if (threadIdx.x < threadsPerTileMajor) {
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len; // ?!
if ((info.flag == DOMAIN_VERTEX))
//|| (info.flag == OUTERMOST))
{
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
f64_tens3 J; // Jacobean
memset(&J, 0, sizeof(f64_tens3));
//d_eps_x_by_d_vx = 1.0;
J.xx = 1.0;
J.yy = 1.0;
J.zz = 1.0;
if (shared_ita_par_verts[threadIdx.x] > 0.0) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
// All same as ion here:
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
f64 grad_vjdx_coeff_on_vj_self = 0.5*(prevpos.y - nextpos.y) / area_quadrilateral;
f64 grad_vjdy_coeff_on_vj_self = 0.5*(nextpos.x - prevpos.x) / area_quadrilateral;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
opp_ita = shared_ita_par[izTri[i] - StartMinor];
opp_nu = shared_nu[izTri[i] - StartMinor];
//ita_par = 0.5*(shared_ita_par_verts[threadIdx.x] + shared_ita_par[izTri[i] - StartMinor]);
//nu = 0.5*(shared_nu_verts[threadIdx.x] + shared_nu[izTri[i] - StartMinor]);
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izTri[i]];
opp_nu = p_nu_elec_minor[izTri[i]];
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par_verts[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (ita_par > 0.0)
Augment_Jacobean(&J,
hsub / (p_n_minor[iVertex + BEGINNING_OF_CENTRAL].n * p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] * m_e),
edge_normal, ita_par, nu, omega_ce,
grad_vjdx_coeff_on_vj_self,
grad_vjdy_coeff_on_vj_self
);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
}; // next i
}; // ita_par > 0
f64_tens3 result;
J.Inverse(result);
memcpy(&(p_matrix_e[iVertex + BEGINNING_OF_CENTRAL]), &result, sizeof(f64_tens3));
} else {
// NOT domain vertex: Do nothing
};
};
// Electrons in tris:
info = p_info_minor[iMinor];
long izNeighMinor[6];
char szPBC[6];
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
}
else {
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_tens3 J; // Jacobean
memset(&J, 0, sizeof(f64_tens3));
//d_eps_x_by_d_vx = 1.0;
J.xx = 1.0;
J.yy = 1.0;
J.zz = 1.0;
if (shared_ita_par[threadIdx.x] > 0.0) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
bool bUsableSide = true;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
opp_ita = shared_ita_par[izNeighMinor[i] - StartMinor];
opp_nu = shared_nu[izNeighMinor[i] - StartMinor];
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_ita = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izNeighMinor[i]];
opp_nu = p_nu_elec_minor[izNeighMinor[i]];
if (opp_ita == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (bUsableSide) {
// New definition of endpoint of minor edge:
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
f64 grad_vjdx_coeff_on_vj_self = 0.5*(prevpos.y - nextpos.y) / area_quadrilateral;
f64 grad_vjdy_coeff_on_vj_self = 0.5*(nextpos.x - prevpos.x) / area_quadrilateral;
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
// can't do prev_v == 0.0
// have to see if prev pos inside ins.
if (prevpos.dot(prevpos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER) // prev is in the insulator.
{
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
grad_vjdx_coeff_on_vj_self = 0.5*((info.pos.y - nextpos.y) + (opppos.y - info.pos.y)) / area_triangle;
grad_vjdy_coeff_on_vj_self = -0.5*((info.pos.x - nextpos.x) + (opppos.x - info.pos.x)) / area_triangle;
}
else {
if (nextpos.dot(nextpos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER) // prev is in the insulator.
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
grad_vjdx_coeff_on_vj_self = 0.5*(
(prevpos.y - info.pos.y)
+ (info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
grad_vjdy_coeff_on_vj_self = -0.5*(
(prevpos.x - info.pos.x)
+ (info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
};
};
};
};
Augment_Jacobean(&J,
hsub / (p_n_minor[iMinor].n * p_AreaMinor[iMinor] * m_e),
edge_normal, ita_par, nu, omega_ce,
grad_vjdx_coeff_on_vj_self,
grad_vjdy_coeff_on_vj_self
);
};
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
};
}; // ita_par > 0.0
f64_tens3 result;
J.Inverse(result);
memcpy(&(p_matrix_e[iMinor]), &result, sizeof(f64_tens3));
}
else {
// Not domain, not crossing_ins, not a frill
} // non-domain tri
}; // was it FRILL
}
//
//__global__ void kernelCollectIntegralsMajorCells_FromSrcSyst(
//
// structural * __restrict__ p_info,
// long * __restrict__ p__triguess, // guess at tri where the point lies
// LONG3 * __restrict__ p_tri_corner_index, // guess at tri where the point lies
// LONG3 * __restrict__ p_tri_neigh_index, // guess at tri where the point lies
//
// bool * __restrict__ p__b_moved,
// Shardmodel * __restrict__ p_shards, // data to integrate
// )
//{
// // Think carefully about how this is to be done.
// // We need to send a chuffload of data to GPU in order to run this on GPU.
// // It can be done though.
//
// long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iMinor OF VERTEX
//
// // Target vertex where we do integrals.
// // This is for n and T.
//
// // We want a slim routine which stores the correct tri location when it is found, AND, only integrates mass.
//
// // What about when we come to integrate momentum on minors? Separate routine but still use linear model of nv on shards.
//
// // Defy all.
// long Src_start_point;
// if (p__b_moved[iVertex]) {
//
// // 1. Find src triangle containing dest point.
//
// // Get corner positions --> which half-planes is it outside? Can we end up scrolling tris indefinitely? iirc yes
//
// . Pick closest rotated image of dest, to move towards. If we keep moving triangles then we will land on the other side of the PB.
//
// . If we are in more than 1 clipping half-plane, choose the direction where dest is farther from the clip line in the orthogonal direction.
//
// // 2. Having found triangle where it lives, identify which corner to use : it must be true that we are within the shards of at least one of the
// // corners of the containing triangle.
//
// // Set Src_start_point. Be prepared to hit no intersection.
//
// } else {
// // it is co-located with previous position; so work outwards from the one that we know maps.
//
// Src_start_point = iVertex;
// }
//
// // Carry on and seek intersection in each of the neighbours.
//
// // We come unstuck because we cannot store a long list of additional places we must visit to accumulate integral.
//
//
//
//
//
// // We found somewhere with nonzero intersection.
//
// cpDest.GetIntersectionWithTriangle(cpIntersection);
//
// cpIntersection.IntegrateMass(shard corners, shard values of n, &result);
//
// Area_accum += cpIntersection.GetArea(); // We stop when this is 100% of total.
//
// mass_integral += result;
//
//
// if (Area_accum > 0.9999999*Total_dest_area) // we got em!
// {
// // can save off the accumulated sum of mass in the dest
//
// n = accum_mass / Area_accum;
// write to global memory.
// } else {
// // keep looking , but how?
//
//
//
// };
//
// To do on GPU is actually too difficult, can't make a dynamic list.
//}
//
__global__ void kernelAverage_n_T_x_to_tris(
nvals * __restrict__ p_n_minor,
nvals * __restrict__ p_n_major,
T3 * __restrict__ p_T_minor,
structural * __restrict__ p_info,
f64_vec2 * __restrict__ p_cc,
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_periodic_corner_flags,
bool bCalculateOnCircumcenters
)
{
__shared__ nvals shared_n[threadsPerTileMajor];
__shared__ T3 shared_T[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos[threadsPerTileMajor];
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // iMinor OF VERTEX
if (threadIdx.x < threadsPerTileMajor)
{
long getindex = blockIdx.x * threadsPerTileMajor + threadIdx.x;
shared_n[threadIdx.x] = p_n_major[getindex];
shared_T[threadIdx.x] = p_T_minor[BEGINNING_OF_CENTRAL + getindex];
shared_pos[threadIdx.x] = p_info[BEGINNING_OF_CENTRAL + getindex].pos;
};
long const StartMajor = blockIdx.x*threadsPerTileMajor; // vertex iMinor
long const EndMajor = StartMajor + threadsPerTileMajor;
LONG3 const tri_corner_index = p_tri_corner_index[iMinor];
CHAR4 const tri_corner_per_flag = p_tri_periodic_corner_flags[iMinor];
structural info = p_info[iMinor];
__syncthreads();
T3 T(0.0, 0.0, 0.0);
nvals n(0.0, 0.0);
f64_vec2 pos(0.0, 0.0);
f64_vec2 cc(0.0, 0.0);
// New plan for this routine: go through position code for all cases except frills.
// Then compute averaging coefficients for domain and crossing_ins, and use them.
//
n.n = 0.0;
n.n_n = 0.0;
T.Te = 0.0; T.Ti = 0.0; T.Tn = 0.0;
f64_vec2 poscorner0, poscorner1, poscorner2;
if ((tri_corner_index.i1 >= StartMajor) && (tri_corner_index.i1 < EndMajor))
{
poscorner0 = shared_pos[tri_corner_index.i1 - StartMajor];
} else {
poscorner0 = p_info[tri_corner_index.i1 + BEGINNING_OF_CENTRAL].pos;
};
if (tri_corner_per_flag.per0 == ROTATE_ME_CLOCKWISE) poscorner0 = Clockwise_d*poscorner0;
if (tri_corner_per_flag.per0 == ROTATE_ME_ANTICLOCKWISE) poscorner0 = Anticlockwise_d*poscorner0;
if ((tri_corner_index.i2 >= StartMajor) && (tri_corner_index.i2 < EndMajor))
{
poscorner1 = shared_pos[tri_corner_index.i2 - StartMajor];
} else {
poscorner1 = p_info[tri_corner_index.i2 + BEGINNING_OF_CENTRAL].pos;
};
if (tri_corner_per_flag.per1 == ROTATE_ME_CLOCKWISE) poscorner1 = Clockwise_d*poscorner1;
if (tri_corner_per_flag.per1 == ROTATE_ME_ANTICLOCKWISE) poscorner1 = Anticlockwise_d*poscorner1;
if ((info.flag != INNER_FRILL) && (info.flag != OUTER_FRILL))
{
if ((tri_corner_index.i3 >= StartMajor) && (tri_corner_index.i3 < EndMajor))
{
poscorner2 = shared_pos[tri_corner_index.i3 - StartMajor];
} else {
poscorner2 = p_info[tri_corner_index.i3 + BEGINNING_OF_CENTRAL].pos;
};
if (tri_corner_per_flag.per2 == ROTATE_ME_CLOCKWISE) poscorner2 = Clockwise_d*poscorner2;
if (tri_corner_per_flag.per2 == ROTATE_ME_ANTICLOCKWISE) poscorner2 = Anticlockwise_d*poscorner2;
f64_vec2 Bb = poscorner1 - poscorner0;
f64_vec2 C = poscorner2 - poscorner0;
f64 D = 2.0*(Bb.x*C.y - Bb.y*C.x);
f64 modB = Bb.x*Bb.x + Bb.y*Bb.y;
f64 modC = C.x*C.x + C.y*C.y;
cc.x = (C.y*modB - Bb.y*modC) / D + poscorner0.x;
cc.y = (Bb.x*modC - C.x*modB) / D + poscorner0.y;
pos = THIRD*(poscorner1 + poscorner0 + poscorner2);
// Always project CC to insulator:
if ((info.flag == CROSSING_INS))
{
f64_vec2 cc2 = cc;
cc2.project_to_radius(cc, DEVICE_RADIUS_INSULATOR_OUTER);
};
// Hold up:
// If cc is outside the triangle, move towards pos until it is inside.
// Take cc-poscorner0 and look at the dimension that is perpendicular to poscorner1-poscorner2
// Is it greater than we get for poscorner1-poscorner0
// If so we've got to move towards pos; how do we know how far to move?
// Presumably component length changes linearly with change in vector so check component length for pos.
// Then test if we are outside the other edge normals.
f64_vec2 minus = cc - poscorner0;
f64_vec2 edgenormal;
edgenormal.x = poscorner2.y - poscorner1.y;
edgenormal.y = poscorner1.x - poscorner2.x;
// Are 0,1,2 anticlockwise? yes
// so if x = y2-y1 then it points out
f64 edgemod = edgenormal.modulus();
edgenormal /= edgemod;
f64 dist = minus.dot(edgenormal);
f64 dist2 = (poscorner2 - poscorner0).dot(edgenormal);
if (dist > dist2) {
f64 dist3 = (pos - poscorner0).dot(edgenormal);
// dist2 = lambda*dist3 + (1-lambda) dist
// lambda = (dist2-dist) / (dist3-dist)
cc.x += ((dist2 - dist) / (dist3 - dist))*(pos.x - cc.x);
cc.y += ((dist2 - dist) / (dist3 - dist))*(pos.y - cc.y);
}
minus = cc - poscorner2;
edgenormal.x = poscorner1.y - poscorner0.y;
edgenormal.y = poscorner0.x - poscorner1.x;
edgemod = edgenormal.modulus();
edgenormal /= edgemod;
dist = minus.dot(edgenormal);
dist2 = (poscorner0 - poscorner2).dot(edgenormal);
if (dist > dist2) {
f64 dist3 = (pos - poscorner2).dot(edgenormal);
cc.x += ((dist2 - dist) / (dist3 - dist))*(pos.x - cc.x);
cc.y += ((dist2 - dist) / (dist3 - dist))*(pos.y - cc.y);
}
minus = cc - poscorner1;
edgenormal.x = poscorner0.y - poscorner2.y;
edgenormal.y = poscorner2.x - poscorner0.x;
edgemod = edgenormal.modulus();
edgenormal /= edgemod;
dist = minus.dot(edgenormal);
dist2 = (poscorner0 - poscorner1).dot(edgenormal);
if (dist > dist2) {
f64 dist3 = (pos - poscorner1).dot(edgenormal);
cc.x += ((dist2 - dist) / (dist3 - dist))*(pos.x - cc.x);
cc.y += ((dist2 - dist) / (dist3 - dist))*(pos.y - cc.y);
}
} else {
// FRILL
pos = 0.5*(poscorner1 + poscorner0);
f64_vec2 pos2 = pos;
if (info.flag == INNER_FRILL) {
pos2.project_to_radius(pos, FRILL_CENTROID_INNER_RADIUS_d);
} else {
pos2.project_to_radius(pos, FRILL_CENTROID_OUTER_RADIUS_d);
};
cc = pos;
}
// Now set up averaging coefficients and set n,T.
// Outer frills it is thus set to n=0,T=0.
// Well, circumcenter is equidistant so 1/3 is still reasonable average.
// I think I prefer linear interpolation, making this a point estimate of n. The masses are saved
// in the vertcells.
if (info.flag == DOMAIN_TRIANGLE) {
f64 lambda1, lambda2, lambda3;
if (bCalculateOnCircumcenters) {
f64_vec2 x0 = poscorner0, x1 = poscorner1, x2 = poscorner2;
f64_vec2 a1, a2;
f64 b1, b2;
// a1.x = (x1.y - x2.y) / ((x0.x - x2.x)*(x1.y - x2.y) - (x1.x - x2.x)*(x0.y - x2.y));
// a1.y = (x2.x - x1.x) / ((x0.x - x2.x)*(x1.y - x2.y) - (x1.x - x2.x)*(x0.y - x2.y));
// b1 = -a1.x*x2.x - a1.y*x2.y;
// a2.x = (x0.y - x2.y) / ((x1.x - x2.x)*(x0.y - x2.y) - (x1.y - x2.y)*(x0.x - x2.x));
// a2.y = (x2.x - x0.x) / ((x1.x - x2.x)*(x0.y - x2.y) - (x1.y - x2.y)*(x0.x - x2.x));
// b2 = -a2.x*x2.x - a2.y*x2.y;
// lambda1 = a1.x*cc.x + a1.y*cc.y + b1;
// lambda2 = a2.x*cc.x + a2.y*cc.y + b2;
// lambda3 = 1.0 - lambda1 - lambda2;
// We are getting lambda3 < 0 when the point is well inside the triangle.
// What gives?
// Try this instead:
lambda1 = ((x1.y - x2.y)*(cc.x - x2.x) + (x2.x - x1.x)*(cc.y - x2.y)) /
((x1.y - x2.y)*(x0.x - x2.x) + (x2.x - x1.x)*(x0.y - x2.y));
lambda2 = ((x2.y-x0.y)*(cc.x-x2.x) + (x0.x-x2.x)*(cc.y-x2.y))/
((x1.y - x2.y)*(x0.x - x2.x) + (x2.x - x1.x)*(x0.y - x2.y));
lambda3 = 1.0 - lambda1 - lambda2;
} else {
lambda1 = THIRD;
lambda2 = THIRD;
lambda3 = THIRD;
};
if ((tri_corner_index.i1 >= StartMajor) && (tri_corner_index.i1 < EndMajor))
{
n += lambda1*shared_n[tri_corner_index.i1 - StartMajor];
T += lambda1*shared_T[tri_corner_index.i1 - StartMajor];
if (TESTTRI) printf("%d sharedvers n %1.10E contribnn %1.10E Tn %1.12E %d\n",
iMinor, n.n, shared_n[tri_corner_index.i1 - StartMajor].n_n,
shared_T[tri_corner_index.i1 - StartMajor].Tn, tri_corner_index.i1);
} else {
n += lambda1*p_n_major[tri_corner_index.i1];
T += lambda1*p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL];
if (TESTTRI) printf("%d loadvers n %1.10E contribnn %1.10E Tn %1.12E\n",
iMinor, n.n, p_n_major[tri_corner_index.i1].n_n,
p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL].Tn);
};
if ((tri_corner_index.i2 >= StartMajor) && (tri_corner_index.i2 < EndMajor))
{
n += lambda2*shared_n[tri_corner_index.i2 - StartMajor];
T += lambda2*shared_T[tri_corner_index.i2 - StartMajor];
if (TESTTRI) printf("%d sharedvers n %1.10E contribnn %1.10E Tn %1.12E %d\n",
iMinor, n.n, shared_n[tri_corner_index.i2 - StartMajor].n_n,
shared_T[tri_corner_index.i2 - StartMajor].Tn, tri_corner_index.i2);
}
else {
n += lambda2*p_n_major[tri_corner_index.i2];
T += lambda2*p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL];
if (TESTTRI) printf("%d loadvers n %1.10E contribnn %1.10E Tn %1.12E\n",
iMinor, n.n, p_n_major[tri_corner_index.i2].n_n,
p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL].Tn);
};
if ((tri_corner_index.i3 >= StartMajor) && (tri_corner_index.i3 < EndMajor))
{
n += lambda3*shared_n[tri_corner_index.i3 - StartMajor];
T += lambda3*shared_T[tri_corner_index.i3 - StartMajor];
if (TESTTRI) printf("%d sharedvers n %1.10E contribnn %1.10E Tn %1.12E %d\n",
iMinor, n.n, shared_n[tri_corner_index.i3 - StartMajor].n_n,
shared_T[tri_corner_index.i3 - StartMajor], tri_corner_index.i3);
}
else {
n += lambda3*p_n_major[tri_corner_index.i3];
T += lambda3*p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL];
if (TESTTRI) printf("%d loadvers n %1.10E contribnn %1.10E Tn %1.12E\n",
iMinor, n.n, p_n_major[tri_corner_index.i3].n_n,
p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL].Tn);
};
if (TESTTRI)
printf("%d: lambda %1.10E %1.10E %1.10E\ncorner n %1.10E %1.10E %1.10E\n"
"cc %1.9E %1.9E | %1.9E %1.9E | %1.9E %1.9E | %1.9E %1.9E \n"
"indexcorner %d %d %d result nn= %1.10E Tn %1.12E \n\n",
iMinor, lambda1, lambda2, lambda3,
p_n_major[tri_corner_index.i1].n,
p_n_major[tri_corner_index.i2].n,
p_n_major[tri_corner_index.i3].n,
cc.x,cc.y, poscorner0.x, poscorner0.y, poscorner1.x, poscorner1.y, poscorner2.x, poscorner2.y,
tri_corner_index.i1, tri_corner_index.i2, tri_corner_index.i3,
n.n_n, T.Tn
);
}
else {
// What else?
if (info.flag == CROSSING_INS)
{
int iAbove = 0;
if ((tri_corner_index.i1 >= StartMajor) && (tri_corner_index.i1 < EndMajor))
{
if (poscorner0.dot(poscorner0) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += shared_n[tri_corner_index.i1 - StartMajor];
T += shared_T[tri_corner_index.i1 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i1,
shared_n[tri_corner_index.i1 - StartMajor].n_n,
shared_T[tri_corner_index.i1 - StartMajor].Tn);
};
}
else {
if (poscorner0.dot(poscorner0) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += p_n_major[tri_corner_index.i1];
T += p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i1,
p_n_major[tri_corner_index.i1].n_n,
p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL].Tn);
}
};
if ((tri_corner_index.i2 >= StartMajor) && (tri_corner_index.i2 < EndMajor))
{
if (poscorner1.dot(poscorner1) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += shared_n[tri_corner_index.i2 - StartMajor];
T += shared_T[tri_corner_index.i2 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i2,
shared_n[tri_corner_index.i2 - StartMajor].n_n,
shared_T[tri_corner_index.i2 - StartMajor].Tn);
};
}
else {
if (poscorner1.dot(poscorner1) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += p_n_major[tri_corner_index.i2];
T += p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i2,
p_n_major[tri_corner_index.i2].n_n,
p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL].Tn);
};
};
if ((tri_corner_index.i3 >= StartMajor) && (tri_corner_index.i3 < EndMajor))
{
if (poscorner2.dot(poscorner2) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += shared_n[tri_corner_index.i3 - StartMajor];
T += shared_T[tri_corner_index.i3 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i3,
shared_n[tri_corner_index.i3 - StartMajor].n_n,
shared_T[tri_corner_index.i3 - StartMajor].Tn);
};
}
else {
if (poscorner2.dot(poscorner2) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += p_n_major[tri_corner_index.i3];
T += p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i3,
p_n_major[tri_corner_index.i3].n_n,
p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL].Tn);
};
};
#ifdef PROJECT_TO_INS_ALWAYS
f64_vec2 pos2 = pos;
pos2.project_to_radius(pos, DEVICE_RADIUS_INSULATOR_OUTER);
#else
if (pos.dot(pos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
f64_vec2 pos2 = pos;
pos2.project_to_radius(pos, DEVICE_RADIUS_INSULATOR_OUTER);
};
// project only if below insulator.!
#endif
f64 divide = 1.0 / (f64)iAbove;
n.n *= divide;
n.n_n *= divide;
T.Tn *= divide;
T.Ti *= divide;
T.Te *= divide;
if (TESTTRI)
printf("%d INS tri: iAbove %d Tn divided: %1.14E\n", iMinor, iAbove, T.Tn);
} else {
if (info.flag == CROSSING_CATH)
{
int iAbove = 0;
if ((tri_corner_index.i1 >= StartMajor) && (tri_corner_index.i1 < EndMajor))
{
if (poscorner0.x*poscorner0.x+(poscorner0.y-CATHODE_ROD_R_POSITION)*(poscorner0.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += shared_n[tri_corner_index.i1 - StartMajor];
T += shared_T[tri_corner_index.i1 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i1,
shared_n[tri_corner_index.i1 - StartMajor].n_n,
shared_T[tri_corner_index.i1 - StartMajor].Tn);
};
} else {
if (poscorner0.x*poscorner0.x + (poscorner0.y - CATHODE_ROD_R_POSITION)*(poscorner0.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += p_n_major[tri_corner_index.i1];
T += p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i1,
p_n_major[tri_corner_index.i1].n_n,
p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL].Tn);
}
};
if ((tri_corner_index.i2 >= StartMajor) && (tri_corner_index.i2 < EndMajor))
{
if (poscorner1.x*poscorner1.x + (poscorner1.y - CATHODE_ROD_R_POSITION)*(poscorner1.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += shared_n[tri_corner_index.i2 - StartMajor];
T += shared_T[tri_corner_index.i2 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i2,
shared_n[tri_corner_index.i2 - StartMajor].n_n,
shared_T[tri_corner_index.i2 - StartMajor].Tn);
};
} else {
if (poscorner1.x*poscorner1.x + (poscorner1.y - CATHODE_ROD_R_POSITION)*(poscorner1.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += p_n_major[tri_corner_index.i2];
T += p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i2,
p_n_major[tri_corner_index.i2].n_n,
p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL].Tn);
};
};
if ((tri_corner_index.i3 >= StartMajor) && (tri_corner_index.i3 < EndMajor))
{
if (poscorner2.x*poscorner2.x + (poscorner2.y - CATHODE_ROD_R_POSITION)*(poscorner2.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += shared_n[tri_corner_index.i3 - StartMajor];
T += shared_T[tri_corner_index.i3 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i3,
shared_n[tri_corner_index.i3 - StartMajor].n_n,
shared_T[tri_corner_index.i3 - StartMajor].Tn);
};
}
else {
if (poscorner2.x*poscorner2.x + (poscorner2.y - CATHODE_ROD_R_POSITION)*(poscorner2.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += p_n_major[tri_corner_index.i3];
T += p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i3,
p_n_major[tri_corner_index.i3].n_n,
p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL].Tn);
};
};
f64_vec2 pos2 = pos;
// pos2.y -= CATHODE_ROD_R_POSITION;
// Now we need to project it on to the circle about (0.0,5.0)
// pos2.project_to_radius(pos, CATHODE_ROD_RADIUS);
// pos.y += CATHODE_ROD_R_POSITION;
// Do not project. !
f64 divide = 1.0 / (f64)iAbove;
n.n *= divide;
n.n_n *= divide;
T.Tn *= divide;
T.Ti *= divide;
T.Te *= divide;
if (TESTTRI)
printf("%d CATH tri: iAbove %d Tn divided: %1.14E\n", iMinor, iAbove, T.Tn);
} else {
// Cool neutrals in frill:
if (info.flag == OUTER_FRILL) {
n.n = INITIAL_BACKGROUND_ION_DENSITY;
n.n_n = INITIAL_TOTAL_DENSITY - INITIAL_BACKGROUND_ION_DENSITY;
T.Te = 4.0e-14; T.Ti = 4.0e-14; T.Tn = 4.0e-14;
} else {
// inner frill? out of domain?
n.n = 0.0;
n.n_n = 0.0;
T.Te = 0.0; T.Ti = 0.0; T.Tn = 0.0;
}
}
};
// Outer frills it is thus set to n=0,T=0.
};
if (TESTTRI) printf("\n%d flag %d Tn %1.12E info.pos.x %1.9E cc.x %1.9E \n", iMinor, info.flag, T.Tn, pos.x, cc.x);
p_n_minor[iMinor] = n;
p_T_minor[iMinor] = T;
info.pos = pos;
p_info[iMinor] = info;
p_cc[iMinor] = cc;
}
__global__ void kernelCreateShardModelOfDensities_And_SetMajorArea(
structural * __restrict__ p_info_minor,
nvals * __restrict__ p_n_major,
nvals * __restrict__ p_n_minor,
long * __restrict__ p_izTri_vert,
char * __restrict__ p_szPBCtri_vert,
f64_vec2 * __restrict__ p_cc,
ShardModel * __restrict__ p_n_shards,
ShardModel * __restrict__ p_n_n_shards,
// long * __restrict__ Tri_n_lists,
// long * __restrict__ Tri_n_n_lists ,
f64 * __restrict__ p_AreaMajor,
bool bUseCircumcenter
)// sets n_shards_n, n_shards, Tri_n_n_lists, Tri_n_lists
{
// called for major tile
// Interpolation to Tri_n_lists, Tri_n_n_lists is not yet implemented. But this would be output.
// Inputs:
// n, pTri->cent, izTri, pTri->periodic, pVertex->pos
// Outputs:
// pVertex->AreaCell
// n_shards[iVertex]
// Tri_n_n_lists[izTri[i]][o1 * 2] <--- 0 if not set by domain vertex
// CALL AVERAGE OF n TO TRIANGLES - WANT QUADRATIC AVERAGE - BEFORE WE BEGIN
// MUST ALSO POPULATE pVertex->AreaCell with major cell area
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ nvals shared_n[threadsPerTileMinor];
// Here 4 doubles/minor. In 16*1024, 4 double*8 bytes*512 minor. 256 major.
// Choosing to store n_n while doing n which is not necessary.
ShardModel n_; // to be populated
int iNeigh, tri_len;
f64 N_n, N, interpolated_n, interpolated_n_n;
long i, inext, o1, o2;
//memset(Tri_n_n_lists, 0, sizeof(f64)*NUMTRIANGLES * 6);
//memset(Tri_n_lists, 0, sizeof(f64)*NUMTRIANGLES * 6);
// We can afford to stick 6-8 doubles in shared. 8 vars*8 bytes*256 threads = 16*1024.
if (bUseCircumcenter == false)
{
structural info2[2];
memcpy(info2, p_info_minor + blockIdx.x*threadsPerTileMinor + 2 * threadIdx.x, sizeof(structural) * 2);
shared_pos[2 * threadIdx.x] = info2[0].pos;
shared_pos[2 * threadIdx.x + 1] = info2[1].pos;
}
else {
memcpy(&(shared_pos[2 * threadIdx.x]), p_cc + blockIdx.x*threadsPerTileMinor + 2 * threadIdx.x, sizeof(f64_vec2) * 2);
}
memcpy(&(shared_n[2 * threadIdx.x]), p_n_minor + blockIdx.x*threadsPerTileMinor + 2 * threadIdx.x, sizeof(nvals) * 2);
__syncthreads();
long const StartMinor = blockIdx.x*threadsPerTileMinor; // vertex index
long const EndMinor = StartMinor + threadsPerTileMinor;
// To fit in Tri_n_n_lists stuff we should first let coeff[] go out of scope.
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[BEGINNING_OF_CENTRAL + iVertex];
if (info.flag == DOMAIN_VERTEX) {
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
f64 coeff[MAXNEIGH]; // total 21*12 = 252 bytes. 256 max for 192 threads.
f64 ndesire0, ndesire1;
f64_vec2 pos0, pos1;
memcpy(izTri, p_izTri_vert + MAXNEIGH_d*iVertex, sizeof(long)*MAXNEIGH_d);
memcpy(szPBC, p_szPBCtri_vert + MAXNEIGH_d*iVertex, sizeof(char)*MAXNEIGH_d);
f64 n_avg = p_n_major[iVertex].n;
// WHY WAS IT minor NOT major ?????????????????????????
if ((izTri[0] >= StartMinor) && (izTri[0] < EndMinor)) {
pos0 = shared_pos[izTri[0] - StartMinor];
ndesire0 = shared_n[izTri[0] - StartMinor].n;
}
else {
if (bUseCircumcenter) {
pos0 = p_cc[izTri[0]];
}
else {
pos0 = p_info_minor[izTri[0]].pos;
} // there exists a more elegant way than this!!!
ndesire0 = p_n_minor[izTri[0]].n;
}
if (szPBC[0] == ROTATE_ME_CLOCKWISE) pos0 = Clockwise_d*pos0;
if (szPBC[0] == ROTATE_ME_ANTICLOCKWISE) pos0 = Anticlockwise_d*pos0;
f64 tri_area;
f64 N0 = 0.0; f64 coeffcent = 0.0;
memset(coeff, 0, sizeof(f64)*MAXNEIGH_d);
short i;
f64 AreaMajor = 0.0;
f64 high_n = ndesire0;
f64 low_n = ndesire0;
#pragma unroll MAXNEIGH
for (i = 0; i < info.neigh_len; i++)
{
// Temporary setting:
n_.n[i] = ndesire0;
inext = i + 1; if (inext == info.neigh_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor)) {
pos1 = shared_pos[izTri[inext] - StartMinor];
ndesire1 = shared_n[izTri[inext] - StartMinor].n;
}
else {
if (bUseCircumcenter) {
pos1 = p_cc[izTri[inext]];
}
else {
pos1 = p_info_minor[izTri[inext]].pos;
}
ndesire1 = p_n_minor[izTri[inext]].n;
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) pos1 = Clockwise_d*pos1;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) pos1 = Anticlockwise_d*pos1;
high_n = max(ndesire1, high_n);
low_n = min(ndesire1, low_n);
tri_area = fabs(0.5*
((pos0.x + pos1.x) * (pos1.y - pos0.y)
+ (info.pos.x + pos1.x) * (info.pos.y - pos1.y)
+ (info.pos.x + pos0.x) * (pos0.y - info.pos.y)));
if (TEST1) printf("%d : ndesire0 %1.10E ndesire1 %1.10E high_n low_n %1.8E %1.8E tri_area %1.9E\n", VERTCHOSEN,
ndesire0, ndesire1, high_n, low_n, tri_area);
N0 += tri_area*THIRD*(ndesire0 + ndesire1);
coeff[i] += tri_area*THIRD;
coeff[inext] += tri_area*THIRD;
coeffcent += tri_area*THIRD;
AreaMajor += tri_area;
pos0 = pos1;
ndesire0 = ndesire1;
};
// . If n_avg > n_max_corners then set all to n_avg.
// . If n_min < n_needed < n_max then set n_cent = n_needed
// Otherwise, we now have coeff array populated and will go round
// repeatedly. We have to reload n lots of times.
// This is not the typical case.
p_AreaMajor[iVertex] = AreaMajor;
if ((n_avg > high_n) || (n_avg < low_n)) {
#pragma unroll MAXNEIGH
for (i = 0; i < info.neigh_len; i++)
n_.n[i] = n_avg;
n_.n_cent = n_avg;
if (TEST1) printf("VERTCHOSEN (n_avg > high_n) || (n_avg < low_n) \n");
// if (iVertex == CHOSEN) printf("CHOSEN : Switch1 n_avg %1.12E \n",n_avg);
}
else {
real n_C_need = (n_avg*AreaMajor - N0) / coeffcent;
if ((n_C_need > low_n) && (n_C_need < high_n)) {
n_.n_cent = n_C_need;
if (TEST1) printf("VERTCHOSEN ((n_C_need > low_n) && (n_C_need < high_n)) \n");
// if (iVertex == CHOSEN) printf("CHOSEN : Switch2 n_C_need %1.12E low_n %1.12E high_n %1.12E\n", n_C_need,low_n,high_n);
}
else {
// The laborious case.
// if (iVertex == CHOSEN) printf("Laborious case...\n");
if (TEST1) printf("VERTCHOSEN The laborious case. n_avg %1.10E n_C_need %1.10E low_n %1.10E high_n %1.10E\n",
n_avg, n_C_need, low_n, high_n);
bool fixed[MAXNEIGH];
memset(fixed, 0, sizeof(bool) * MAXNEIGH);
// cannot fit even this alongside the rest we have in L1.
// Can we make szPBC go out of scope by here?
f64 n_C, n_acceptable;
if (n_C_need < low_n) {
// the mass is low. So for those less than some n_acceptable,
// let them attain n_desire, and fix n_C = low_n.
// Then we'll see how high we can go with n_acceptable.
// if (iVertex == CHOSEN) printf("(n_C_need < low_n)\n");
n_C = low_n;
n_acceptable = (n_avg*AreaMajor - coeffcent*n_C) / (AreaMajor - THIRD*AreaMajor);
// area-THIRD*area = sum of other coeffs, and of course
// coeffcent = THIRD*area
// n_acceptable > N/area since N=area*n_avg > area*low_n.
// We accept things that are less than this 'max average', and
// let that increase the threshold; go again until
// the time we do not find any new lower items ;
bool found = 0;
do {
found = 0;
f64 coeffremain = 0.0;
f64 N_attained = coeffcent*low_n;
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) {
// Go collect ndesire[i]:
f64 ndesire;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
ndesire = shared_n[izTri[i] - StartMinor].n;
}
else {
ndesire = p_n_minor[izTri[i]].n;
};
// if (iVertex == CHOSEN) printf("CHOSEN : ndesire %1.14E n_acceptable %1.14E\n", ndesire,n_acceptable);
if (ndesire < n_acceptable) { // yes, use ndesire[i] ...
fixed[i] = true;
n_.n[i] = ndesire;
N_attained += n_.n[i] * coeff[i];
found = true;
}
else {
coeffremain += coeff[i];
};
}
else {
N_attained += n_.n[i] * coeff[i];
};
};
// It can happen that eventually ALL are found
// to be < n_acceptable due to FP error.
// On next pass found will be false.
if ((found != 0) && (coeffremain > 0.0)) {
n_acceptable = (n_avg*AreaMajor - N_attained) / coeffremain;
// The value to which we have to set the remaining
// n values.
};
// if (iVertex == CHOSEN) printf("---\n");
} while (found != 0);
}
else {
n_C = high_n;
n_acceptable = (n_avg*AreaMajor - coeffcent*n_C) / (AreaMajor - THIRD*AreaMajor);
bool found = 0;
do {
found = 0;
f64 coeffremain = 0.0;
f64 N_attained = coeffcent*high_n;
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) {
// Go collect ndesire[i]:
f64 ndesire;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
ndesire = shared_n[izTri[i] - StartMinor].n;
}
else {
ndesire = p_n_minor[izTri[i]].n;
};
// if (iVertex == CHOSEN) printf("CHOSEN : ndesire %1.14E n_acceptable %1.14E\n", ndesire, n_acceptable);
if (ndesire > n_acceptable) {
// yes, use ndesire[i] ...
fixed[i] = true;
n_.n[i] = ndesire;
N_attained += n_.n[i] * coeff[i];
found = true;
}
else {
coeffremain += coeff[i];
};
}
else {
N_attained += n_.n[i] * coeff[i];
};
};
if ((found != 0) && (coeffremain > 0.0)) {
n_acceptable = (n_avg*AreaMajor - N_attained) / coeffremain;
};
// if (iVertex == CHOSEN) printf("@@@ \n");
} while (found != 0);
};
// Now we should set the remaining values to n_acceptable
// which is less than ndesire[i] in all those cases.
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) n_.n[i] = n_acceptable;
if (TEST1) printf("n[%d]: %1.10E\n", i, n_.n[i]);
};
n_.n_cent = n_C;
if (TEST1) {
for (i = 0; i < info.neigh_len; i++)
{
printf("%1.10E \t\t", n_.n[i]);
}
printf("\nn_cent %1.14E \n\n", n_.n_cent);
};
};
};
memcpy(&(p_n_shards[iVertex]), &n_, sizeof(ShardModel));
if (TEST1) printf("iVertex %d n_cent %1.10E nmajor(=n_avg) %1.10E \n*********\n",
iVertex, n_.n_cent, n_avg);
// Now start again: neutrals
n_avg = p_n_major[iVertex].n_n;
if ((izTri[0] >= StartMinor) && (izTri[0] < EndMinor)) {
pos0 = shared_pos[izTri[0] - StartMinor];
ndesire0 = shared_n[izTri[0] - StartMinor].n_n;
}
else {
if (bUseCircumcenter) {
pos0 = p_cc[izTri[0]];
}
else {
pos0 = p_info_minor[izTri[0]].pos;
};
ndesire0 = p_n_minor[izTri[0]].n_n;
}
if (szPBC[0] == ROTATE_ME_CLOCKWISE) pos0 = Clockwise_d*pos0;
if (szPBC[0] == ROTATE_ME_ANTICLOCKWISE) pos0 = Anticlockwise_d*pos0;
N0 = 0.0;
//coeffcent = 0.0;
//memset(coeff, 0, sizeof(f64)*MAXNEIGH_d); // keep em
high_n = ndesire0;
low_n = ndesire0;
#pragma unroll MAXNEIGH
for (i = 0; i < info.neigh_len; i++)
{
// Temporary setting:
n_.n[i] = ndesire0;
inext = i + 1; if (inext == info.neigh_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor)) {
pos1 = shared_pos[izTri[inext] - StartMinor];
ndesire1 = shared_n[izTri[inext] - StartMinor].n_n;
}
else {
if (bUseCircumcenter) {
pos1 = p_cc[izTri[inext]];
}
else {
pos1 = p_info_minor[izTri[inext]].pos;
}
ndesire1 = p_n_minor[izTri[inext]].n_n;
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) pos1 = Clockwise_d*pos1;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) pos1 = Anticlockwise_d*pos1;
high_n = max(ndesire1, high_n);
low_n = min(ndesire1, low_n);
tri_area = fabs(0.5*
((pos0.x + pos1.x) * (pos1.y - pos0.y)
+ (info.pos.x + pos1.x) * (info.pos.y - pos1.y)
+ (info.pos.x + pos0.x) * (pos0.y - info.pos.y)));
N0 += tri_area*THIRD*(ndesire0 + ndesire1); // Could consider moving it into loop above.
pos0 = pos1;
ndesire0 = ndesire1;
};
// . If n_avg > n_max_corners then set all to n_avg.
// . If n_min < n_needed < n_max then set n_cent = n_needed
// Otherwise, we now have coeff array populated and will go round
// repeatedly. We have to reload n lots of times.
// This is not the typical case.
if ((n_avg > high_n) || (n_avg < low_n)) {
#pragma unroll MAXNEIGH
for (i = 0; i < info.neigh_len; i++)
n_.n[i] = n_avg;
n_.n_cent = n_avg;
}
else {
real n_C_need = (n_avg*AreaMajor - N0) / coeffcent;
if ((n_C_need > low_n) && (n_C_need < high_n)) {
n_.n_cent = n_C_need; // accept desired values
}
else {
// The laborious case.
bool fixed[MAXNEIGH];
memset(fixed, 0, sizeof(bool) * MAXNEIGH);
// cannot fit even this alongside the rest we have in L1.
// Can we make szPBC go out of scope by here?
f64 n_C, n_acceptable;
if (n_C_need < low_n) {
// the mass is low. So for those less than some n_acceptable,
// let them attain n_desire, and fix n_C = low_n.
// Then we'll see how high we can go with n_acceptable.
n_C = low_n;
n_acceptable = (n_avg*AreaMajor - coeffcent*n_C) / (AreaMajor - THIRD*AreaMajor);
// area-THIRD*area = sum of other coeffs, and of course
// coeffcent = THIRD*area
// n_acceptable > N/area since N=area*n_avg > area*low_n.
// We accept things that are less than this 'max average', and
// let that increase the threshold; go again until
// the time we do not find any new lower items ;
bool found = 0;
do {
found = 0;
f64 coeffremain = 0.0;
f64 N_attained = coeffcent*low_n;
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) {
// Go collect ndesire[i]:
f64 ndesire;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
ndesire = shared_n[izTri[i] - StartMinor].n_n;
}
else {
ndesire = p_n_minor[izTri[i]].n_n;
};
if (ndesire < n_acceptable) { // yes, use ndesire[i] ...
fixed[i] = true;
n_.n[i] = ndesire;
N_attained += n_.n[i] * coeff[i];
found = true;
}
else {
coeffremain += coeff[i];
};
}
else {
N_attained += n_.n[i] * coeff[i];
};
};
// It can happen that eventually ALL are found
// to be < n_acceptable due to FP error.
// On next pass found will be false.
if ((found != 0) && (coeffremain > 0.0)) {
n_acceptable = (n_avg*AreaMajor - N_attained) / coeffremain;
// The value to which we have to set the remaining
// n values.
};
} while (found != 0);
}
else {
n_C = high_n;
n_acceptable = (n_avg*AreaMajor - coeffcent*n_C) / (AreaMajor - THIRD*AreaMajor);
bool found = 0;
do {
found = 0;
f64 coeffremain = 0.0;
f64 N_attained = coeffcent*high_n;
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) {
// Go collect ndesire[i]:
f64 ndesire;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
ndesire = shared_n[izTri[i] - StartMinor].n_n;
}
else {
ndesire = p_n_minor[izTri[i]].n_n;
};
if (ndesire > n_acceptable) {
// yes, use ndesire[i] ...
fixed[i] = true;
n_.n[i] = ndesire;
N_attained += n_.n[i] * coeff[i];
found = true;
}
else {
coeffremain += coeff[i];
};
}
else {
N_attained += n_.n[i] * coeff[i];
};
};
if ((found != 0) && (coeffremain > 0.0)) {
n_acceptable = (n_avg*AreaMajor - N_attained) / coeffremain;
};
} while (found != 0);
};
// Now we should set the remaining values to n_acceptable
// which is less than ndesire[i] in all those cases.
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) n_.n[i] = n_acceptable;
};
n_.n_cent = n_C;
};
};
memcpy(&(p_n_n_shards[iVertex]), &n_, sizeof(ShardModel));
// Now done both species.
}
else { // NOT DOMAIN_VERTEX
if (info.flag == OUTERMOST) {
n_.n_cent = p_n_major[iVertex].n;
for (i = 0; i < MAXNEIGH; i++)
n_.n[i] = n_.n_cent;
memcpy(&(p_n_shards[iVertex]), &n_, sizeof(ShardModel));
if (iVertex == VERTCHOSEN) printf("%d n_major.n %1.10E n_.n[4] %1.8E n_.n_cent %1.8E\n\n\n",
iVertex, p_n_major[iVertex].n, n_.n[4], n_.n_cent);
n_.n_cent = p_n_major[iVertex].n_n;
for (i = 0; i < MAXNEIGH; i++)
n_.n[i] = n_.n_cent;
memcpy(&(p_n_n_shards[iVertex]), &n_, sizeof(ShardModel));
f64 AreaTotal = PPN_CIRCLE*M_PI*(DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS -
INNER_A_BOUNDARY*INNER_A_BOUNDARY);
p_AreaMajor[iVertex] = AreaTotal / (real)(numTilesMajor*threadsPerTileMajor); // ?
// Setting area of outermost to average vertcell area
// ...
// Watch out for this when we make OUTERMOST FEWER
}
else {
memset(&(p_n_shards[iVertex]), 0, sizeof(ShardModel));
memset(&(p_n_n_shards[iVertex]), 0, sizeof(ShardModel));
p_AreaMajor[iVertex] = 0.0; // NOTE BENE
};
};
// NexT: tri_n_lists.
// Think I am not using this passing mechanism for n_shards information.
/*
for (i = 0; i < cp.numCoords; i++)
{
// for 2 triangles each corner:
// first check which number corner this vertex is
// make sure we enter them in order that goes anticlockwise for the
// Then we need to make izMinorNeigh match this somehow
// Let's say izMinorNeigh goes [across corner 0, across edge 2, corner 1, edge 0, corner 2, edge 1]
// We want 0,1 to be the values corresp corner 0.
// shard value 0 is in tri 0. We look at each pair of shard values in turn to interpolate.
inext = i + 1; if (inext == cp.numCoords) inext = 0;
interpolated_n = THIRD * (n_shards[iVertex].n[i] + n_shards[iVertex].n[inext] + n_shards[iVertex].n_cent);
interpolated_n_n = THIRD * (n_shards_n[iVertex].n[i] + n_shards_n[iVertex].n[inext] + n_shards_n[iVertex].n_cent);
// contribute to tris i and inext:
o1 = (T + izTri[i])->GetCornerIndex(X + iVertex);
o2 = (T + izTri[inext])->GetCornerIndex(X + iVertex);
// Now careful which one's which:
// inext sees this point as more anticlockwise.
Tri_n_lists[izTri[inext]][o2 * 2 + 1] = interpolated_n;
Tri_n_lists[izTri[i]][o1 * 2] = interpolated_n;
Tri_n_n_lists[izTri[inext]][o2 * 2 + 1] = interpolated_n_n;
Tri_n_n_lists[izTri[i]][o1 * 2] = interpolated_n_n;
};*/
}
__global__ void kernelCreateShardModelOfDensities_And_SetMajorAreaDEBUG(
structural * __restrict__ p_info_minor,
nvals * __restrict__ p_n_major,
nvals * __restrict__ p_n_minor,
long * __restrict__ p_izTri_vert,
char * __restrict__ p_szPBCtri_vert,
f64_vec2 * __restrict__ p_cc,
ShardModel * __restrict__ p_n_shards,
ShardModel * __restrict__ p_n_n_shards,
// long * __restrict__ Tri_n_lists,
// long * __restrict__ Tri_n_n_lists ,
f64 * __restrict__ p_AreaMajor,
bool bUseCircumcenter
)// sets n_shards_n, n_shards, Tri_n_n_lists, Tri_n_lists
{
// called for major tile
// Interpolation to Tri_n_lists, Tri_n_n_lists is not yet implemented. But this would be output.
// Inputs:
// n, pTri->cent, izTri, pTri->periodic, pVertex->pos
// Outputs:
// pVertex->AreaCell
// n_shards[iVertex]
// Tri_n_n_lists[izTri[i]][o1 * 2] <--- 0 if not set by domain vertex
// CALL AVERAGE OF n TO TRIANGLES - WANT QUADRATIC AVERAGE - BEFORE WE BEGIN
// MUST ALSO POPULATE pVertex->AreaCell with major cell area
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ nvals shared_n[threadsPerTileMinor];
// Here 4 doubles/minor. In 16*1024, 4 double*8 bytes*512 minor. 256 major.
// Choosing to store n_n while doing n which is not necessary.
ShardModel n_; // to be populated
int iNeigh, tri_len;
f64 N_n, N, interpolated_n, interpolated_n_n;
long i, inext, o1, o2;
long const StartMinor = blockIdx.x*threadsPerTileMinor; // vertex index
long const EndMinor = StartMinor + threadsPerTileMinor;
// To fit in Tri_n_n_lists stuff we should first let coeff[] go out of scope.
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[BEGINNING_OF_CENTRAL + iVertex];
if (info.flag == DOMAIN_VERTEX) {
} else { // NOT DOMAIN_VERTEX
if (info.flag == OUTERMOST) {
// f64 AreaTotal = PPN_CIRCLE*M_PI*(DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS -
// INNER_A_BOUNDARY*INNER_A_BOUNDARY);
// p_AreaMajor[iVertex] = AreaTotal;
// commented 2
// Setting area of outermost to average vertcell area
// ...
// Watch out for this when we make OUTERMOST FEWER
} else {
memset(&(p_n_shards[iVertex]), 0, sizeof(ShardModel));
// This alone kills it.
// Checking that it still dies when we scrap the first part.
// If this really kills then try doing hipMemset and see if that dies.
// Good - still dies.
// 3 : Try get rid of this one:
// memset(&(p_n_n_shards[iVertex]), 0, sizeof(ShardModel));
// is this what kills it?
// YES! Surprisingly.
// p_AreaMajor[iVertex] = 0.0; // NOTE BENE
// ?
};
// COMMENTED 1
};
}
__global__ void kernelInferMinorDensitiesFromShardModel(
structural * __restrict__ p_info,
nvals * __restrict__ p_n_minor,
ShardModel * __restrict__ p_n_shards,
ShardModel * __restrict__ p_n_shards_n,
LONG3 * __restrict__ p_tri_corner_index,
LONG3 * __restrict__ p_who_am_I_to_corner,
nvals * __restrict__ p_one_over_n
) {
// Assume that we do the simplest thing possible.
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // iMinor OF VERTEX
structural info = p_info[iMinor];
nvals result;
if (iMinor >= BEGINNING_OF_CENTRAL)
{
// if (iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL) printf("\niMinor %d pos %1.10E %1.10E flag %d \n",
// iMinor, info.pos.x, info.pos.y, info.flag);
if (info.flag == DOMAIN_VERTEX) {
result.n = p_n_shards[iMinor - BEGINNING_OF_CENTRAL].n_cent;
result.n_n = p_n_shards_n[iMinor - BEGINNING_OF_CENTRAL].n_cent;
p_n_minor[iMinor] = result;
result.n = 1.0 / result.n;
result.n_n = 1.0 / result.n_n;
p_one_over_n[iMinor] = result;
// We are not being consistent.
// We may wish to use major n here --> minor central n
// We have not done the shard model for target n, we just average and then tween this back.
// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
} else {
// Outermost vertex?
result.n = 0.0;
result.n_n = 0.0;
if (info.flag == OUTERMOST) {
result.n_n = 1.0e18;
result.n = UNIFORM_n_d;
};
p_n_minor[iMinor] = result;
result.n_n = 1.0 / result.n_n;
result.n = 1.0 / result.n;
p_one_over_n[iMinor] = result;
}
} else {
if (info.flag == DOMAIN_TRIANGLE) {
LONG3 tri_corner_index = p_tri_corner_index[iMinor];
LONG3 who_am_I_to_corner = p_who_am_I_to_corner[iMinor];
result.n = THIRD*
(p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1]
+ p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2]
+ p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3]);
result.n_n = THIRD*
(p_n_shards_n[tri_corner_index.i1].n[who_am_I_to_corner.i1]
+ p_n_shards_n[tri_corner_index.i2].n[who_am_I_to_corner.i2]
+ p_n_shards_n[tri_corner_index.i3].n[who_am_I_to_corner.i3]);
p_n_minor[iMinor] = result;
if (TESTTRI) printf("%d: %d %d %d shards n %1.10E %1.10E %1.10E result %1.10E\n",
CHOSEN, tri_corner_index.i1, tri_corner_index.i2, tri_corner_index.i3,
p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1],
p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2],
p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3], result.n);
result.n = THIRD*(
1.0/ p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1]
+ 1.0/ p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2]
+ 1.0/p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3]);
result.n_n = THIRD*
(1.0/p_n_shards_n[tri_corner_index.i1].n[who_am_I_to_corner.i1]
+ 1.0/p_n_shards_n[tri_corner_index.i2].n[who_am_I_to_corner.i2]
+ 1.0/p_n_shards_n[tri_corner_index.i3].n[who_am_I_to_corner.i3]);
p_one_over_n[iMinor] = result;
} else {
if ((info.flag == CROSSING_INS) || (info.flag == CROSSING_CATH)) {
LONG3 tri_corner_index = p_tri_corner_index[iMinor];
LONG3 who_am_I_to_corner = p_who_am_I_to_corner[iMinor];
result.n = 0.0;
result.n_n = 0.0;
structural info1, info2, info3;
info1 = p_info[BEGINNING_OF_CENTRAL + tri_corner_index.i1];
info2 = p_info[BEGINNING_OF_CENTRAL + tri_corner_index.i2];
info3 = p_info[BEGINNING_OF_CENTRAL + tri_corner_index.i3];
int numabove = 0;
if (info1.flag == DOMAIN_VERTEX) {
numabove++;
result.n += p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1];
result.n_n += p_n_shards_n[tri_corner_index.i1].n[who_am_I_to_corner.i1];
};
if (info2.flag == DOMAIN_VERTEX) {
numabove++;
result.n += p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2];
result.n_n += p_n_shards_n[tri_corner_index.i2].n[who_am_I_to_corner.i2];
};
if (info3.flag == DOMAIN_VERTEX) {
numabove++;
result.n += p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3];
result.n_n += p_n_shards_n[tri_corner_index.i3].n[who_am_I_to_corner.i3];
};
if (TESTTRI) printf("%d: %d %d %d C-INS shards n %1.10E %1.10E %1.10E result %1.10E numabove %d\n",
CHOSEN, tri_corner_index.i1, tri_corner_index.i2, tri_corner_index.i3,
p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1],
p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2],
p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3], result.n,
numabove);
result.n /= (f64)numabove;
result.n_n /= (f64)numabove;
p_n_minor[iMinor] = result;
result.n = 1.0 / result.n;
result.n_n = 1.0 / result.n_n;
p_one_over_n[iMinor] = result;
} else {
memset(&(p_n_minor[iMinor]), 0, sizeof(nvals));
}
}
}
}
/*
__global__ void kernelAccumulateDiffusiveHeatRate_new_Longitudinalonly(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
T3 * __restrict__ p_T_major,
T3 * __restrict__ p_T_k,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_kappa_n,
f64 * __restrict__ p_kappa_i,
f64 * __restrict__ p_kappa_e,
f64 * __restrict__ p_nu_i,
f64 * __restrict__ p_nu_e,
NTrates * __restrict__ NTadditionrates,
f64 * __restrict__ p_AreaMajor)
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever];
__shared__ f64 shared_T[threadsPerTileMajorClever]; // +3
//__shared__ f64 shared_T[threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
f64 tempf64[2];
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
}
#endif
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
shared_T[threadIdx.x] = p_T_major[iVertex].Tn;
}
else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid? IMPORTANT
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
f64_vec2 grad_T;
f64 T_anti, T_clock, T_out, T_outk; // 5
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Need this, we are adding on to existing d/dt N,NT :
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
// EXPERIMENT WHETHER IT IS FASTER WITH THESE OUTSIDE OR INSIDE THE BRANCH.
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
// Now do Tn:
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Tn;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Tn;
#endif
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Tn;
#endif
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Tn; // ready for switch around
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
if (T_clock == 0.0) {
#ifdef BWDSIDET
T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_clock = T_outk;
#endif
};
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Tn;
#endif
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Tn; // Stupid 3-struct
// Also need to update T_opp if it was not done already
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Tn;
};
#endif
if (T_anti == 0.0) {
#ifdef BWDSIDET
T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_anti = T_outk;
#endif
}; // So we are receiving 0 then doing this. But how come?
// Now let's see
// tri 0 has neighs 0 and 1 I'm pretty sure (check....) CHECK
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
if (TEST) {
printf("%d contrib %1.8E \n"
"pos_anti %1.9E %1.9E pos_out %1.9E %1.9E pos_clock %1.9E %1.9E\n", iVertex,
0.5*edge_normal.x*THIRD*(pos_anti.x + pos_clock.x
+ info.pos.x + info.pos.x + pos_out.x + pos_out.x),
pos_anti.x, pos_anti.y, pos_out.x, pos_out.y, pos_clock.x, pos_clock.y);
}
// SMARTY:
if (pos_out.x*pos_out.x + pos_out.y*pos_out.y >
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
// How to detect? Loading a load of flags is a killer! We do need to load ... and this is why we should have not made info struct. Def not.
////
//if (insulator triangle)
//{
// centroid1 = THIRD*(pos_anti + pos_out + info.pos);
// // project to radius of insulator
// centroid1.project_to_radius(3.44);
// // Now dot with unit vectors:
// f64_vec2 tempvec2;
// tempvec2.x = unit_vec1.x*centroid1.x + unit_vec1.y*centroid1.y;
// tempvec2.y = unit_vec2.x*centroid1.x + unit_vec2.y*centroid1.y;
// centroid1.x = tempvec2.x;
// centroid1.y = tempvec2.y;
//} else {
// // centroid1 = THIRD*(pos_anti_twist + pos_out_twist);
// centroid1.x = THIRD*(
// unit_vec1.x*(pos_anti.x - info.pos.x) + unit_vec1.y*(pos_anti.y - info.pos.y)
// + unit_vec1.x*(pos_out.x - info.pos.x) + unit_vec1.y*(pos_out.y - info.pos.y)
// );
// centroid1.y = THIRD*(
// - unit_vec1.y*(pos_anti.x - info.pos.x) + unit_vec1.x*(pos_anti.y - info.pos.y)
// - unit_vec1.y*(pos_out.x - info.pos.x) + unit_vec1.x*(pos_out.y - info.pos.y)
// );
//}
//if (insulator triangle)
//{
// centroid2 = THIRD*(pos_clock + pos_out + info.pos);
// // project to radius of insulator
//} else {
//}
kappa_parallel = 0.0;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_n[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_n[izTri[iPrev]];
}
}
if ((pos_clock.x*pos_clock.x + pos_clock.y*pos_clock.y <
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
||
(pos_anti.x*pos_anti.x + pos_anti.y*pos_anti.y <
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other species, make a subroutine.
grad_T.x = 0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifdef BWDSIDET
T_clock = T_out;
T_out = T_anti;
#else
T_clock = T_outk;
T_outk = T_anti;
#endif
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_i + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_i + 2 * iVertex, 2 * sizeof(f64));
}
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_major[iVertex].Ti;
// Notice major inefficiency caused by not making them scalar T arrays
}
else {
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Ti;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Ti;
#endif
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Ti;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Ti;
#endif
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
if (T_clock == 0.0) {
#ifdef BWDSIDET
T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_clock = T_outk;
#endif
};
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Ti;
#endif
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Ti;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Ti;
};
#endif
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
if (T_anti == 0.0) {
#ifdef BWDSIDET
T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_anti = T_outk;
#endif
}; // So we are receiving 0 then doing this. But how come?
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (pos_out.x*pos_out.x + pos_out.y*pos_out.y >
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
//f64 Area_quadrilateral = 0.5*(
// (info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
// + (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
// + (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
// + (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
// );
//grad_T.x = 0.5*(
// (shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
// + (T_out + T_clock)*(pos_out.y - pos_clock.y)
// + (T_anti + T_out)*(pos_anti.y - pos_out.y)
// ) / Area_quadrilateral;
//grad_T.y = -0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
// + (T_out + T_clock)*(pos_out.x - pos_clock.x)
// + (T_anti + T_out)*(pos_anti.x - pos_out.x)
// ) / Area_quadrilateral;
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_i[izTri[iNeigh]];
nu = 0.5*p_nu_i[izTri[iNeigh]];
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_i[izTri[iPrev]];
nu += 0.5*p_nu_i[izTri[iPrev]];
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
//
// ourrates.NiTi += TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega));
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
ourrates.NiTi += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
if (TEST) printf("%d iNeigh %d kappa_ion %1.8E nu %1.8E |o| %1.8E contrib %1.8E \n",
iVertex, iNeigh, kappa_parallel, nu,
omega.modulus(),
TWOTHIRDS * kappa_parallel *(
edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
+ edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
) / (nu * nu + omega.dot(omega))
);
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifndef BWDSIDET
T_clock = T_outk;
T_outk = T_anti;
#else
T_clock = T_out;
T_out = T_anti;
#endif
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_e + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_e + 2 * iVertex, 2 * sizeof(f64));
}
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_major[iVertex].Te;
}
else {
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Te;
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Te;
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
if (T_clock == 0.0) {
#ifdef BWDSIDET
T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_clock = T_outk;
#endif
};
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Te;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Te;
}
#endif
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
if (T_anti == 0.0) {
#ifdef BWDSIDET
T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_anti = T_outk;
#endif
}; // So we are receiving 0 then doing this. But how come?
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
if (TEST) {
printf("%d : %d endpt_anti %1.9E %1.9E SHARED endpt_clock %1.9E %1.9E izTri[iNeigh] %d\n",
iVertex, iNeigh, endpt_anti.x, endpt_anti.y, endpt_clock.x, endpt_clock.y, izTri[iNeigh]);
}
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
if (TEST) {
printf("%d : %d endpt_anti %1.9E %1.9E GLOBAL endpt_clock %1.9E %1.9E izTri[iNeigh] %d\n",
iVertex, iNeigh, endpt_anti.x, endpt_anti.y, endpt_clock.x, endpt_clock.y, izTri[iNeigh]);
}
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
// It decided to rotate something it shouldn't oughta. Rotated tri 23600 = tri 2 for 11582.
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (pos_out.x*pos_out.x + pos_out.y*pos_out.y >
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
// f64 grad_out = (T_out - shared_T[threadIdx.x]) / delta_0out;
//f64 Area_quadrilateral = 0.5*(
// (info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
// + (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
// + (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
// + (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
// );
//grad_T.x = 0.5*(
// (shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
// + (T_out + T_clock)*(pos_out.y - pos_clock.y)
// + (T_anti + T_out)*(pos_anti.y - pos_out.y)
// ) / Area_quadrilateral;
//grad_T.y = -0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
// + (T_out + T_clock)*(pos_out.x - pos_clock.x)
// + (T_anti + T_out)*(pos_anti.x - pos_out.x)
// ) / Area_quadrilateral;
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_e[izTri[iNeigh]];
nu = 0.5*p_nu_e[izTri[iNeigh]];
};
if (TEST) printf("izTri %d kappa_par %1.9E \n",
izTri[iNeigh], p_kappa_e[izTri[iNeigh]]);
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_e[izTri[iPrev]];
nu += 0.5*p_nu_e[izTri[iPrev]];
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
// ourrates.NeTe += TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y - nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y + nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega));
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
ourrates.NeTe += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
// Expensive debug: remove!
if (TESTHEAT2) printf(
"iVertex %d iNeigh %d %d contribNeTe %1.9E edge_normal %1.8E %1.8E \n"
"T %1.9E Tout %1.9E T_anti %1.9E T_clock %1.9E\n"
" kappa_par %1.9E nu %1.9E |omega| %1.9E Area %1.9E\n"
"our_n %1.9E our n_n %1.9E nearby n %1.9E %1.9E\n"
"pos %1.8E %1.8E opp %1.8E %1.8E anti %1.8E %1.8E clock %1.8E %1.8E\n"
"omega %1.8E %1.8E grad_T %1.9E %1.9E \n"
"=================================================\n",
iVertex, iNeigh, indexneigh,
TWOTHIRDS * kappa_parallel *(
edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y - nu * omega.z)*grad_T.y)
+ edge_normal.y*((omega.x*omega.y + nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
) / (nu * nu + omega.dot(omega)),
edge_normal.x, edge_normal.y, shared_T[threadIdx.x], T_out, T_anti, T_clock,
kappa_parallel, nu, sqrt(omega.dot(omega)),
p_AreaMajor[iVertex],
p_n_major[iVertex].n, p_n_major[iVertex].n_n, p_n_major[indexneigh].n, p_n_major[indexneigh].n_n,
info.pos.x, info.pos.y, pos_out.x, pos_out.y, pos_anti.x, pos_anti.y, pos_clock.x, pos_clock.y,
omega.x, omega.y, grad_T.x, grad_T.y);
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifdef BWDSIDET
T_clock = T_out;
T_out = T_anti;
#else
T_clock = T_outk;
T_outk = T_anti;
#endif
}; // next iNeigh
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
}*/
__global__ void kernelAccumulateDiffusiveHeatRate_new_Longitudinalonly_scalarT(
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_T_n, f64 * __restrict__ p_T_i, f64 * __restrict__ p_T_e,
// T3 * __restrict__ p_T_k,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_kappa_n,
f64 * __restrict__ p_kappa_i,
f64 * __restrict__ p_kappa_e,
f64 * __restrict__ p_nu_i,
f64 * __restrict__ p_nu_e,
NTrates * __restrict__ NTadditionrates,
f64 * __restrict__ p_AreaMajor,
bool * __restrict__ p_maskbool3,
bool * __restrict__ p_maskblock,
bool bUseMask)
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
// DO NOT WANT:
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever];
__shared__ f64 shared_T[threadsPerTileMajorClever]; // +3
//__shared__ f64 shared_T[threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask[3];
if (bUseMask)
if (p_maskblock[blockIdx.x] == 0) return;
if (bUseMask) {
bMask[0] = p_maskbool3[iVertex];
bMask[1] = p_maskbool3[iVertex + NUMVERTICES];
bMask[2] = p_maskbool3[iVertex + NUMVERTICES*2];
//memcpy(bMask, p_maskbool3 + iVertex * 3, 3 * sizeof(bool));
}
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
f64 tempf64[2];
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
}
#endif
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
shared_T[threadIdx.x] = p_T_n[iVertex];
} else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid?
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
f64_vec2 grad_T;
f64 T_anti, T_clock, T_out, T_outk; // 5
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
if ( (info.flag == DOMAIN_VERTEX) && (
((bUseMask == 0) || (bMask[0] == true) || (bMask[1] == true) || (bMask[2] == true))))
{
// Need this, we are adding on to existing d/dt N,NT :
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
};
if ((bUseMask == 0) || (bMask[0] == true) )
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Now do Tn:
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
T_clock = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
T_clock = p_T_n[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
T_out = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
T_out = p_T_n[indexneigh]; // saved nothing here, only in loading
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
T_anti = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
T_anti = p_T_n[indexneigh];
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
//if (T_anti == 0.0) {
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//}; // So we are receiving 0 then doing this. But how come?
//Mimic
// Now let's see
// tri 0 has neighs 0 and 1 I'm pretty sure (check....) CHECK
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
if (0) {
printf("%d contrib %1.8E \n"
"pos_anti %1.9E %1.9E pos_out %1.9E %1.9E pos_clock %1.9E %1.9E\n",
iVertex,
0.5*edge_normal.x*THIRD*(pos_anti.x + pos_clock.x
+ info.pos.x + info.pos.x + pos_out.x + pos_out.x),
pos_anti.x, pos_anti.y, pos_out.x, pos_out.y, pos_clock.x, pos_clock.y);
}
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_n[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_n[izTri[iPrev]];
}
}
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other species, make a subroutine.
grad_T.x = 0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
}
// This is correct, grad T in same coordinates as edge_normal...
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
T_clock = T_out;
T_out = T_anti;
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
}; // mask
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
#pragma unroll
for (int iSpecies = 1; iSpecies < 3; iSpecies++)
{
if (iSpecies == 1)
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_i + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_i + 2 * iVertex, 2 * sizeof(f64));
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_i[iVertex];
// Notice major inefficiency caused by not making them scalar T arrays
}
else {
shared_T[threadIdx.x] = 0.0;
}
}
else {
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_e + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_e + 2 * iVertex, 2 * sizeof(f64));
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_e[iVertex];
// Notice major inefficiency caused by not making them scalar T arrays
}
else {
shared_T[threadIdx.x] = 0.0;
}
};
// Maybe this alone means combining the ion & electron code was stupid. Maybe it can't make contig access.
__syncthreads();
if ((bUseMask == 0) || (bMask[iSpecies] == true)) // either there is no masking, or this is switched on
{
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
T_clock = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
if (iSpecies == 1) {
T_clock = p_T_i[indexneigh];
}
else {
T_clock = p_T_e[indexneigh];
};
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
T_out = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
if (iSpecies == 1) {
T_out = p_T_i[indexneigh];
}
else {
T_out = p_T_e[indexneigh];
};
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
T_anti = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
if (iSpecies == 1)
{
T_anti = p_T_i[indexneigh];
}
else {
T_anti = p_T_e[indexneigh];
};
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
// if (T_anti == 0.0) {
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
// }; // So we are receiving 0 then doing this. But how come?
// BUG -- masked stuff will go wrong.
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
//f64 Area_quadrilateral = 0.5*(
// (info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
// + (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
// + (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
// + (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
// );
//grad_T.x = 0.5*(
// (shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
// + (T_out + T_clock)*(pos_out.y - pos_clock.y)
// + (T_anti + T_out)*(pos_anti.y - pos_out.y)
// ) / Area_quadrilateral;
//grad_T.y = -0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
// + (T_out + T_clock)*(pos_out.x - pos_clock.x)
// + (T_anti + T_out)*(pos_anti.x - pos_out.x)
// ) / Area_quadrilateral;
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
if (iSpecies == 1) {
kappa_parallel = 0.5*p_kappa_i[izTri[iNeigh]];
nu = 0.5*p_nu_i[izTri[iNeigh]];
}
else {
kappa_parallel = 0.5*p_kappa_e[izTri[iNeigh]];
nu = 0.5*p_nu_e[izTri[iNeigh]];
};
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
if (iSpecies == 1) {
kappa_parallel += 0.5*p_kappa_i[izTri[iPrev]];
nu += 0.5*p_nu_i[izTri[iPrev]];
}
else {
kappa_parallel += 0.5*p_kappa_e[izTri[iPrev]];
nu += 0.5*p_nu_e[izTri[iPrev]];
};
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega;
if (iSpecies == 1) {
omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
}
else {
omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
};
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
//
// ourrates.NiTi += TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega));
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
// We don't need test for T == 0 because we don't use anti or clock
// and we ruled out looking into insulator.
if (iSpecies == 1) {
ourrates.NiTi += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
//if (iVertex == VERTCHOSEN) {
// printf("iVertex %d T_out T_our %1.10E %1.10E contrib %1.10E\n",
// iVertex, T_out, shared_T[threadIdx.x],
// TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
// (nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
// / (delta_out*edgelen *(nu * nu + omega.dot(omega)))
// );
//}
} else {
ourrates.NeTe += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
};
if ((TESTHEAT1) && (iSpecies == 2))
printf("%d iNeigh %d %d e factor %1.12E contrib %1.12E T_out %1.12E T_self %1.12E\n",
iVertex, iNeigh, indexneigh,
TWOTHIRDS * kappa_parallel *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega))),
(T_out - shared_T[threadIdx.x])*
TWOTHIRDS * kappa_parallel *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega))),
T_out, shared_T[threadIdx.x]
);
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
T_clock = T_out;
T_out = T_anti;
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
}; // debug
}; // mask
__syncthreads();
};
if ((TESTHEAT1)) printf("%d ourrates.NeTe %1.10E \n", iVertex, ourrates.NeTe);
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
// It was not necessarily sensible to combine ion and electron
// However, it is quite daft having a separate routine for vector2 grad T (??)
}
__global__ void kernelAccumulateDiffusiveHeatRate_new_Longitudinalonly_1species(
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p__T,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p__kappa,
f64 * __restrict__ p__nu,
NTrates * __restrict__ NTadditionrates,
f64 * __restrict__ p_AreaMajor,
bool * __restrict__ p_maskbool,
bool * __restrict__ p_maskblock,
bool bUseMask,
int iSpecies)
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
// DO NOT WANT:
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever]; // but as far as we know, we are having to use circumcenters.
// Maybe it works without them now that we have the longitudinal assumptions --- don't know for sure.
// But it means we are not being consistent with our definition of a cell?
// Like having major cells Voronoi => velocity living on centroids (which it must, for visc + A) is in slightly the wrong place.
__shared__ f64 shared_T[threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask;
if (bUseMask)
if (p_maskblock[blockIdx.x] == 0) return;
if (bUseMask) bMask = p_maskbool[iVertex];
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
}
#endif
memcpy(&(shared_nu[threadIdx.x * 2]), p__nu + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_kappa[threadIdx.x * 2]), p__kappa + 2 * iVertex, 2 * sizeof(f64));
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
shared_T[threadIdx.x] = p__T[iVertex];
}
else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid?
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
f64_vec2 grad_T;
f64 T_anti, T_clock, T_out, T_outk; // 5
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
if ((info.flag == DOMAIN_VERTEX) && ((bUseMask == 0) || (bMask == true)))
{
// Need this, we are adding on to existing d/dt N,NT :
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Now do Tn:
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
T_clock = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
T_clock = p__T[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
T_out = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
T_out = p__T[indexneigh]; // saved nothing here, only in loading
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
// if (T_clock == 0.0) {
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
T_anti = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
T_anti = p__T[indexneigh];
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
//if (T_anti == 0.0) {
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//}; // So we are receiving 0 then doing this. But how come?
//Mimic
// Now let's see
// tri 0 has neighs 0 and 1 I'm pretty sure (check....) CHECK
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
f64 nu;
if (iSpecies == 0) {
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p__kappa[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p__kappa[izTri[iPrev]];
}
}
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other iSpecies, make a subroutine.
grad_T.x = 0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
};
// This is correct, grad T in same coordinates as edge_normal...
}
else {
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p__kappa[izTri[iNeigh]];
nu = 0.5*p__nu[izTri[iNeigh]];
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p__kappa[izTri[iPrev]];
nu += 0.5*p__nu[izTri[iPrev]];
};
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
{ // scoping brace
f64_vec3 omega;
if (iSpecies == 1) {
omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
}
else {
omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
};
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
if (iSpecies == 1) {
ourrates.NiTi += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
}
else {
ourrates.NeTe += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
if (TESTHEAT)
printf("%d %d iSpecies %d contrib %1.10E kappa_par %1.9E\nT_out %1.9E T %1.9E nu %1.9E omega %1.9E %1.9E\n", iVertex, iNeigh, iSpecies,
TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega))),
kappa_parallel, T_out, shared_T[threadIdx.x], nu, omega.x, omega.y
);
};
}
}; // if iSpecies == 0
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
T_clock = T_out;
T_out = T_anti;
}; // next iNeigh
}; // DOMAIN vertex active in mask
// Turned out to be stupid having a struct called NTrates. We just want to modify one scalar at a time.
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
}
#include "heatflux.cu"
;
__global__ void kernelCalc_SelfCoefficient_for_HeatConduction
(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_kappa_n,
f64 * __restrict__ p_kappa_i,
f64 * __restrict__ p_kappa_e,
f64 * __restrict__ p_nu_i,
f64 * __restrict__ p_nu_e,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p_coeffself_n,
f64 * __restrict__ p_coeffself_i,
f64 * __restrict__ p_coeffself_e // outputs
)
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
// DO NOT WANT:
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
f64 tempf64[2];
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
}
#endif
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
}
else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid?
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
}
__syncthreads();
f64_vec2 grad_T;
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
memset(&ourrates, 0, sizeof(NTrates));
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Need this, we are adding on to existing d/dt N,NT :
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
// EXPERIMENT WHETHER IT IS FASTER WITH THESE OUTSIDE OR INSIDE THE BRANCH.
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_n[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_n[izTri[iPrev]];
}
}
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(-1.0) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other species, make a subroutine.
//grad_T.x = 0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
// + (T_out + T_clock)*(pos_out.y - pos_clock.y)
// + (T_anti + T_out)*(pos_anti.y - pos_out.y)
// ) / Area_quadrilateral;
//grad_T.y = -0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
// + (T_out + T_clock)*(pos_out.x - pos_clock.x)
// + (T_anti + T_out)*(pos_anti.x - pos_out.x)
// ) / Area_quadrilateral;
grad_T.x = 0.5*(pos_clock.y - pos_anti.y) / Area_quadrilateral;
grad_T.y = -0.5*(pos_clock.x - pos_anti.x) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
}
// This is correct, grad T in same coordinates as edge_normal...
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
#pragma unroll
for (int iSpecies = 1; iSpecies < 3; iSpecies++)
{
if (iSpecies == 1)
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_i + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_i + 2 * iVertex, 2 * sizeof(f64));
}
else {
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_e + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_e + 2 * iVertex, 2 * sizeof(f64));
};
// Maybe this alone means combining the ion & electron code was stupid. Maybe it can't make contig access.
__syncthreads();
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
if (iSpecies == 1) {
kappa_parallel = 0.5*p_kappa_i[izTri[iNeigh]];
nu = 0.5*p_nu_i[izTri[iNeigh]];
}
else {
kappa_parallel = 0.5*p_kappa_e[izTri[iNeigh]];
nu = 0.5*p_nu_e[izTri[iNeigh]];
};
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
if (iSpecies == 1) {
kappa_parallel += 0.5*p_kappa_i[izTri[iPrev]];
nu += 0.5*p_nu_i[izTri[iPrev]];
}
else {
kappa_parallel += 0.5*p_kappa_e[izTri[iPrev]];
nu += 0.5*p_nu_e[izTri[iPrev]];
};
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
{ // scoping brace
f64_vec3 omega;
if (iSpecies == 1) {
omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
}
else {
omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
};
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
//
// ourrates.NiTi += TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega));
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
if (iSpecies == 1) {
ourrates.NiTi += TWOTHIRDS * kappa_parallel * (-1.0) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
} else {
ourrates.NeTe += TWOTHIRDS * kappa_parallel * (-1.0) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
};
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
__syncthreads();
};
// Now compute self coeff from d/dself dNT/dt
nvals n_use = p_n_major[iVertex];
f64 AreaMajor = p_AreaMajor[iVertex];
f64 Nn = n_use.n_n*AreaMajor;
f64 N = n_use.n *AreaMajor;
p_coeffself_n[iVertex] = sqrt(Nn) - (h_use / sqrt(Nn))*ourrates.NnTn; // ourrates is ROC epsilon wrt self
p_coeffself_i[iVertex] = sqrt(N) - (h_use / sqrt(N))*ourrates.NiTi; // ourrates is ROC epsilon wrt self
p_coeffself_e[iVertex] = sqrt(N) - (h_use / sqrt(N))*ourrates.NeTe; // ourrates is ROC epsilon wrt self
// NOTE BENE THAT WE UPDATED THIS IN VIEW OF:
// epsilon *= sqrt(N);
if ((iVertex == VERTCHOSEN) || (iVertex == VERTCHOSEN2))
printf("%d coeffself (1-h/N rates) %1.10E Rates %1.10E h/N %1.10E\n\n",
iVertex, p_coeffself_n[iVertex], ourrates.NnTn, h_use / N);
//if (iVertex == VERTCHOSEN) printf("iVertex %d coeffself_i %1.10E \n", iVertex, p_coeffself_i[iVertex]);
// ourrates is negative so this is > 1.
}
__global__ void kernelPowerminushalf
(f64 * __restrict__ p_input, f64 * __restrict__ p_output)
{
long const index = threadIdx.x + blockIdx.x*blockDim.x;
p_output[index] = 1.0 / sqrt(p_input[index]);
}
__global__ void kernelVolleyRegressors(
f64 * __restrict__ p_regress,
long const Length,
char * __restrict__ p_iVolley
) {
long const iVertex = threadIdx.x + blockDim.x*blockIdx.x;
// p_regress is regr+NUMVERTICES. That is the position of epsilon.
char cVolley = p_iVolley[iVertex];
f64 regr1 = p_regress[iVertex];
f64 regr2 = p_regress[iVertex + Length];
f64 regr3 = regr1*((cVolley == 1)? 1 : 0);
f64 regr4 = regr2*((cVolley == 1) ? 1 : 0);
f64 regr5 = regr1*((cVolley == 2) ? 1 : 0);
f64 regr6 = regr2*((cVolley == 2)? 1 : 0);
f64 regr7 = regr1*((cVolley > 2)? 1 : 0);
f64 regr8 = regr2*((cVolley > 2) ? 1 : 0);
regr1 = regr1*((cVolley == 0) ? 1 : 0);
regr2 = regr2*((cVolley == 0) ? 1 : 0);
p_regress[iVertex] = regr1;
p_regress[iVertex + Length] = regr2;
p_regress[iVertex + 2 * Length] = regr3;
p_regress[iVertex + 3 * Length] = regr4;
p_regress[iVertex + 4 * Length] = regr5;
p_regress[iVertex + 5 * Length] = regr6;
// No thanks: leave 7 free
//p_regress[iVertex + 6 * Length] = regr7;
//p_regress[iVertex + 7 * Length] = regr8;
}
__global__ void kernelCreateEpsilonHeat
(
f64 const hsub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_eps_n,
f64 * __restrict__ p_eps_i,
f64 * __restrict__ p_eps_e,
f64 * __restrict__ p_NT_n,
f64 * __restrict__ p_NT_i,
f64 * __restrict__ p_NT_e,
T3 * __restrict__ p_T_k,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major,
NTrates * __restrict__ NTadditionrates, // it's especially silly having a whole struct of 5 instead of 3 here.
bool * __restrict__ p_b_Failed,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskblock,
bool bUseMask
)
{
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
bool bMask[3];
if (bUseMask) {
//memcpy(bMask, p_bMask3 + 3 * iVertex, sizeof(bool) * 3); // until we break out species!
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + 2*NUMVERTICES];
if ((bMask[0] == 0) && (bMask[1] == 0) && (bMask[2] == 0)) return;
};
structural info = p_info_major[iVertex];
if (info.flag == DOMAIN_VERTEX) {
T3 T_k = p_T_k[iVertex];
NTrates ourrates;
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
f64 AreaMajor = p_AreaMajor[iVertex];
nvals n = p_n_major[iVertex];
f64 epsilon_n, epsilon_i, epsilon_e;
bool bFail = false;
if (bMask[0] || (bUseMask == 0)) {
f64 sqrtNn = sqrt(AreaMajor*n.n_n);
f64 NnTn = p_NT_n[iVertex]; // means sqrtN T
epsilon_n = NnTn - T_k.Tn*sqrtNn - (hsub / sqrtNn)*ourrates.NnTn;
if (epsilon_n*epsilon_n > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(NnTn*NnTn + 1.0e-10*1.0e-10)) bFail = true;
// Note that ourrates already included the factor 1/sqrtN on our own sqrt(N)T
} else {
epsilon_n = 0.0;
};
if (bMask[1] || (bUseMask == 0)) {
f64 sqrtN = sqrt(AreaMajor*n.n);
f64 NTi = p_NT_i[iVertex];
epsilon_i = NTi - T_k.Ti*sqrtN - (hsub / sqrtN)*ourrates.NiTi;
if (epsilon_i*epsilon_i > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(NTi*NTi + 1.0e-10*1.0e-10)) bFail = true;
if (iVertex == VERTCHOSEN) printf("%d NTi %1.10E sqrtN Tk %1.10E hsub / sqrtN %1.10E NiTi %1.10E eps_i %1.10E sqrtN %1.10E\n",
iVertex, NTi, T_k.Ti*sqrtN, hsub / sqrtN, ourrates.NiTi, epsilon_i, sqrtN);
} else {
epsilon_i = 0.0;
};
if (bMask[2] || (bUseMask == 0)) {
f64 sqrtN = sqrt(AreaMajor*n.n);
f64 NTe = p_NT_e[iVertex]; // is this sqrtN T ?
epsilon_e = NTe - T_k.Te*sqrtN - (hsub / sqrtN)*ourrates.NeTe;
if (epsilon_e*epsilon_e > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(NTe*NTe + 1.0e-10*1.0e-10)) bFail = true;
} else {
epsilon_e = 0.0;
};
if (epsilon_n != epsilon_n)printf("epsilon_n NaN iVertex %d n_n %1.10E Area %1.10E \n",
iVertex, n.n_n, AreaMajor);
p_eps_n[iVertex] = epsilon_n;
p_eps_i[iVertex] = epsilon_i;
p_eps_e[iVertex] = epsilon_e;
if (p_b_Failed != 0) {
if (bFail)
p_b_Failed[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
};
} else {
p_eps_n[iVertex] = 0.0;
p_eps_i[iVertex] = 0.0;
p_eps_e[iVertex] = 0.0;
};
}
__global__ void kernelSelectivelyZeroNTrates(
NTrates * __restrict__ NTadditionrates,
bool * __restrict__ p_bMask3
) {
long const iVertex = threadIdx.x + blockIdx.x*blockDim.x;
bool bMask[3];
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + 2 * NUMVERTICES];
if ((bMask[0] == 0) && (bMask[1] == 0) && (bMask[2] == 0)) return;
NTrates dNTbydt;
memcpy(&dNTbydt, &(NTadditionrates[iVertex]), sizeof(NTrates));
if (bMask[0]) dNTbydt.NnTn = 0.0;
if (bMask[1]) dNTbydt.NiTi = 0.0;
if (bMask[2]) dNTbydt.NeTe = 0.0;
memcpy(&(NTadditionrates[iVertex]), &dNTbydt, sizeof(NTrates));
}
__global__ void kernelCreateEpsilonHeat_Equilibrated
(
f64 const hsub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_eps_n,
f64 * __restrict__ p_eps_i,
f64 * __restrict__ p_eps_e,
f64 * __restrict__ p_sqrtDNT_n,
f64 * __restrict__ p_sqrtDNT_i,
f64 * __restrict__ p_sqrtDNT_e,
T3 * __restrict__ p_T_k,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_invsqrtD_n,
f64 * __restrict__ p_invsqrtD_i,
f64 * __restrict__ p_invsqrtD_e,
NTrates * __restrict__ NTadditionrates, // it's especially silly having a whole struct of 5 instead of 3 here.
bool * __restrict__ p_b_Failed,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskblock,
bool bUseMask
)
{
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
bool bMask[3];
if (bUseMask) {
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + 2*NUMVERTICES];
if ((bMask[0] == 0) && (bMask[1] == 0) && (bMask[2] == 0)) return;
};
structural info = p_info_major[iVertex];
if (info.flag == DOMAIN_VERTEX) {
T3 T_k = p_T_k[iVertex];
NTrates ourrates;
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
f64 AreaMajor = p_AreaMajor[iVertex];
nvals n = p_n_major[iVertex];
f64 epsilon_n, epsilon_i, epsilon_e;
bool bFail = false;
if ((bMask[0]) || (bUseMask == 0)) {
f64 sqrtNn = sqrt(AreaMajor*n.n_n);
f64 sqrtDN_T = p_sqrtDNT_n[iVertex]; // means sqrtDN T
f64 sqrtDinv = p_invsqrtD_n[iVertex];
// epsilon_i = NTi - T_k.Ti*sqrtN - (hsub / sqrtN)*ourrates.NiTi;
// Multiply epsilon by D^-1/2 and
// wherever a sqrt(DN)T appears multiply it by D_j^-1/2 to give sqrt(N)T
// The multiplication D_j^-1/2 was already included in T -> ourrates
epsilon_n = sqrtDinv*sqrtDinv*sqrtDN_T
- sqrtDinv*(hsub / sqrtNn)*ourrates.NnTn
- sqrtDinv*T_k.Tn*sqrtNn;
f64 test_epsilon = epsilon_n / sqrtDinv; // divides take long.
f64 sqrtNn_Tn = sqrtDinv*sqrtDN_T;
if (test_epsilon*test_epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(sqrtNn_Tn*sqrtNn_Tn + 1.0e-10*1.0e-10)) bFail = true;
// Let's be careful about that threshold. It's for N T^2.
// sqrt(N) that we care about ~ 1e4. T that we care about ~ 1e-14. We then go REL_THRESH*that.
// Note that ourrates already included the factor 1/sqrtN on our own sqrt(N)T
} else {
epsilon_n = 0.0;
};
if ((bMask[1]) || (bUseMask == 0)) {
f64 sqrtN = sqrt(AreaMajor*n.n);
f64 sqrtDN_T = p_sqrtDNT_i[iVertex]; // means sqrtDN T
f64 sqrtDinv = p_invsqrtD_i[iVertex];
epsilon_i = sqrtDinv*sqrtDinv*sqrtDN_T
- sqrtDinv*(hsub / sqrtN)*ourrates.NiTi
- sqrtDinv*T_k.Ti*sqrtN;
//epsilon_i = NTi - T_k.Ti*sqrtN - (hsub / sqrtN)*ourrates.NiTi;
f64 test_epsilon = epsilon_i / sqrtDinv;
f64 sqrtN_Ti = sqrtDinv*sqrtDN_T;
if (test_epsilon*test_epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(sqrtN_Ti*sqrtN_Ti + 1.0e-10*1.0e-10)) bFail = true;
}
else {
epsilon_i = 0.0;
};
if ((bMask[2]) || (bUseMask == 0)) {
f64 sqrtN = sqrt(AreaMajor*n.n);
f64 sqrtDN_T = p_sqrtDNT_e[iVertex]; // means sqrtDN T
f64 sqrtDinv = p_invsqrtD_e[iVertex];
epsilon_e = sqrtDinv*sqrtDinv*sqrtDN_T
- sqrtDinv*(hsub / sqrtN)*ourrates.NeTe
- sqrtDinv*T_k.Te*sqrtN;
// epsilon_e = NTe - T_k.Te*sqrtN - (hsub / sqrtN)*ourrates.NeTe;
f64 test_epsilon = epsilon_e / sqrtDinv;
f64 sqrtN_Te = sqrtDinv*sqrtDN_T;
if (test_epsilon*test_epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(sqrtN_Te*sqrtN_Te + 1.0e-10*1.0e-10)) bFail = true;
}
else {
epsilon_e = 0.0;
};
// if (TEST) printf("%d epsilon_e %1.8E NTe %1.8E nete %1.8E\n",
// iVertex, epsilon_e, NTe, ourrates.NeTe);
if (epsilon_n != epsilon_n)printf("epsilon_n NaN iVertex %d n_n %1.10E Area %1.10E \n",
iVertex, n.n_n, AreaMajor);
p_eps_n[iVertex] = epsilon_n;
p_eps_i[iVertex] = epsilon_i;
p_eps_e[iVertex] = epsilon_e;
if (p_b_Failed != 0) {
if (bFail)
p_b_Failed[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
};
}
else {
p_eps_n[iVertex] = 0.0;
p_eps_i[iVertex] = 0.0;
p_eps_e[iVertex] = 0.0;
};
}
__global__ void kernelCreateEpsilonHeatOriginalScaling
(
f64 const hsub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_eps_n,
f64 * __restrict__ p_eps_i,
f64 * __restrict__ p_eps_e,
f64 * __restrict__ p_T_n,
f64 * __restrict__ p_T_i,
f64 * __restrict__ p_T_e,
T3 * __restrict__ p_T_k,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major,
NTrates * __restrict__ NTadditionrates ,// it's especially silly having a whole struct of 5 instead of 3 here.
bool * __restrict__ bTest
)
{
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
structural info = p_info_major[iVertex];
if (info.flag == DOMAIN_VERTEX) {
f64 Tn = p_T_n[iVertex];
f64 Ti = p_T_i[iVertex];
f64 Te = p_T_e[iVertex];
T3 T_k = p_T_k[iVertex];
f64 AreaMajor = p_AreaMajor[iVertex];
nvals n = p_n_major[iVertex];
NTrates ourrates;
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
f64 Nn = (AreaMajor*n.n_n);
f64 epsilon_n = Tn - T_k.Tn - (hsub / Nn)*ourrates.NnTn;
f64 N = (AreaMajor*n.n);
f64 epsilon_i = Ti - T_k.Ti - (hsub / N)*ourrates.NiTi;
f64 epsilon_e = Te - T_k.Te - (hsub / N)*ourrates.NeTe;
p_eps_n[iVertex] = epsilon_n;
p_eps_i[iVertex] = epsilon_i;
p_eps_e[iVertex] = epsilon_e;
if ((epsilon_n*epsilon_n > 1.0e-24*(Tn*Tn + 1.0e-14*1.0e-14))
|| (epsilon_i*epsilon_i > 1.0e-24*(Ti*Ti + 1.0e-14*1.0e-14))
|| (epsilon_e*epsilon_e > 1.0e-24*(Te*Te + 1.0e-14*1.0e-14))
)
bTest[blockIdx.x] = true;
}
else {
p_eps_n[iVertex] = 0.0;
p_eps_i[iVertex] = 0.0;
p_eps_e[iVertex] = 0.0;
}
}
__global__ void kernelAccumulateDiffusiveHeatRate_new_Full(
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
T3 * __restrict__ p_T_major,
//T3 * __restrict__ p_T_putative,
bool * __restrict__ p_bool_longi,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_kappa_n,
f64 * __restrict__ p_kappa_i,
f64 * __restrict__ p_kappa_e,
f64 * __restrict__ p_nu_i,
f64 * __restrict__ p_nu_e,
NTrates * __restrict__ NTadditionrates,
f64 * __restrict__ p_AreaMajor,
bool bCheckWhetherToDoctorUp,
bool * __restrict__ p_maskbool3,
bool * __restrict__ p_maskblock,
bool bUseMask
//T3 * __restrict__ p_T_putative
) // test whether we are pushing heat uphill...
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever];
__shared__ f64 shared_T[threadsPerTileMajorClever]; // +3
//__shared__ f64 shared_T[threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask[3];
if (bUseMask)
if (p_maskblock[blockIdx.x] == 0) return;
if (bUseMask) {
//memcpy(bMask, p_maskbool3 + iVertex * 3, 3 * sizeof(bool));
bMask[0] = p_maskbool3[iVertex];
bMask[1] = p_maskbool3[iVertex + NUMVERTICES];
bMask[2] = p_maskbool3[iVertex + NUMVERTICES*2];
}
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
f64 tempf64[2];
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
}
#endif
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
if (TESTHEATFULL) printf("iVertex %d : B_major[iVertex] %1.10E %1.10E \n^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&\n",
iVertex, p_B_major[iVertex].x, p_B_major[iVertex].y);
shared_T[threadIdx.x] = p_T_major[iVertex].Tn;
}
else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid?
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
f64_vec2 grad_T;
f64 T_anti, T_clock, T_out, T_outk; // 5
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
if ((bUseMask == 0) || (bMask[0] == true) || (bMask[1] == true) || (bMask[2] == true))
{
// Need this, we are adding on to existing d/dt N,NT :
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
}
if ((bUseMask == 0) || (bMask[0] == true)) // either there is no masking, or this is switched on
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// // [ Ignore flux into edge of outermost vertex I guess ???]
// long index0 = Indexneigh[MAXNEIGH_d * threadIdx.x + 0];
// long index1 = Indexneigh[MAXNEIGH_d * threadIdx.x + 1];
// long index2 = Indexneigh[MAXNEIGH_d * threadIdx.x + 2];
// long index3 = Indexneigh[MAXNEIGH_d * threadIdx.x + 3];
// long index4 = Indexneigh[MAXNEIGH_d * threadIdx.x + 4];
// printf("DEBUG: iVertex %d info.neigh_len %d izNeigh %d %d %d %d \n"
// "flags 0 %d %d %d %d \n"
// "positions (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) \n"
// , iVertex, info.neigh_len,
// index0, index1, index2, index3,
// p_info_minor[index0+BEGINNING_OF_CENTRAL].flag,
// p_info_minor[index1 + BEGINNING_OF_CENTRAL].flag,
// p_info_minor[index2 + BEGINNING_OF_CENTRAL].flag,
// p_info_minor[index3 + BEGINNING_OF_CENTRAL].flag,
// p_info_minor[index0 + BEGINNING_OF_CENTRAL].pos.x, p_info_minor[index0 + BEGINNING_OF_CENTRAL].pos.y,
// p_info_minor[index1 + BEGINNING_OF_CENTRAL].pos.x, p_info_minor[index1 + BEGINNING_OF_CENTRAL].pos.y,
// p_info_minor[index2 + BEGINNING_OF_CENTRAL].pos.x, p_info_minor[index2 + BEGINNING_OF_CENTRAL].pos.y,
// p_info_minor[index3 + BEGINNING_OF_CENTRAL].pos.x, p_info_minor[index3 + BEGINNING_OF_CENTRAL].pos.y
// );
} else {
if (info.flag == DOMAIN_VERTEX) {
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Now do Tn:
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
} else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Tn;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Tn;
#endif
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Tn;
#endif
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Tn; // ready for switch around
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
//#ifdef BWDSIDET
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_clock = T_outk;
//#endif
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Tn;
#endif
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Tn; // Stupid 3-struct
// Also need to update T_opp if it was not done already
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Tn;
};
#endif
//
// if (T_anti == 0.0) {
//#ifdef BWDSIDET
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_anti = T_outk;
//#endif
// }; // So we are receiving 0 then doing this. But how come?
// Mimic
// Now let's see
// tri 0 has neighs 0 and 1 I'm pretty sure (check....) CHECK
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
// How to detect? Loading a load of flags is a killer! We do need to load ... and this is why we should have not made info struct. Def not.
////
//if (insulator triangle)
//{
// centroid1 = THIRD*(pos_anti + pos_out + info.pos);
// // project to radius of insulator
// centroid1.project_to_radius(3.44);
// // Now dot with unit vectors:
// f64_vec2 tempvec2;
// tempvec2.x = unit_vec1.x*centroid1.x + unit_vec1.y*centroid1.y;
// tempvec2.y = unit_vec2.x*centroid1.x + unit_vec2.y*centroid1.y;
// centroid1.x = tempvec2.x;
// centroid1.y = tempvec2.y;
//} else {
// // centroid1 = THIRD*(pos_anti_twist + pos_out_twist);
// centroid1.x = THIRD*(
// unit_vec1.x*(pos_anti.x - info.pos.x) + unit_vec1.y*(pos_anti.y - info.pos.y)
// + unit_vec1.x*(pos_out.x - info.pos.x) + unit_vec1.y*(pos_out.y - info.pos.y)
// );
// centroid1.y = THIRD*(
// - unit_vec1.y*(pos_anti.x - info.pos.x) + unit_vec1.x*(pos_anti.y - info.pos.y)
// - unit_vec1.y*(pos_out.x - info.pos.x) + unit_vec1.x*(pos_out.y - info.pos.y)
// );
//}
//if (insulator triangle)
//{
// centroid2 = THIRD*(pos_clock + pos_out + info.pos);
// // project to radius of insulator
//} else {
//}
kappa_parallel = 0.0;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_n[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_n[izTri[iPrev]];
}
}
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other species, make a subroutine.
if ((T_clock == 0.0) || (T_anti == 0.0)) {
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
} else {
grad_T.x = 0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
};
// This is correct, grad T in same coordinates as edge_normal...
};
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifdef BWDSIDET
T_clock = T_out;
T_out = T_anti;
#else
T_clock = T_outk;
T_outk = T_anti;
#endif
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
};
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_i + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_i + 2 * iVertex, 2 * sizeof(f64));
}
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_major[iVertex].Ti;
// Notice major inefficiency caused by not making them scalar T arrays
}
else {
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
if ((bUseMask == 0) || (bMask[1] == true)) // either there is no masking, or this is switched on
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Ti;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Ti;
#endif
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Ti;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Ti;
#endif
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
//#ifdef BWDSIDET
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_clock = T_outk;
//#endif
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Ti;
#endif
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Ti;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Ti;
};
#endif
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
//
// if (T_anti == 0.0) {
//#ifdef BWDSIDET
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_anti = T_outk;
//#endif
// }; // So we are receiving 0 then doing this. But how come?
// Mimic
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
} else {
kappa_parallel = 0.5*p_kappa_i[izTri[iNeigh]];
nu = 0.5*p_nu_i[izTri[iNeigh]];
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
} else {
kappa_parallel += 0.5*p_kappa_i[izTri[iPrev]];
nu += 0.5*p_nu_i[izTri[iPrev]];
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
} else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
// Use longitudinal:
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
f64_vec3 omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
f64 long_contrib = TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
ourrates.NiTi += long_contrib;
} else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
grad_T.x = 0.5*(
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
f64 contrib = TWOTHIRDS * kappa_parallel *(
edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y - nu * omega.z)*grad_T.y)
+ edge_normal.y*((omega.x*omega.y + nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
) / (nu * nu + omega.dot(omega));
// Rule 1. Not a greater flow than isotropic
// Rule 2. Not the opposite direction to isotropic - minimum zero
f64 iso_contrib = TWOTHIRDS * kappa_parallel *(edge_normal.x*grad_T.x + edge_normal.y*grad_T.y);
if (contrib > 0.0) {
if ((iso_contrib > 0.0) && (contrib > iso_contrib)) contrib = iso_contrib;
if (iso_contrib < 0.0) contrib = 0.0;
} else {
if ((iso_contrib < 0.0) && (contrib < iso_contrib)) contrib = iso_contrib;
if (iso_contrib > 0.0) contrib = 0.0;
}
//
// if (TESTHEATFULL) printf("%d iNeigh %d kappa_ion %1.8E nu %1.8E |o| %1.8E contrib %1.8E \n",
// iVertex, iNeigh, kappa_parallel, nu,
// omega.modulus(),
// TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega))
// );
//
if (bCheckWhetherToDoctorUp) {
// Now ask if this flow is going uphill:
bool b_out = p_bool_longi[indexneigh * 2];
bool b_here = p_bool_longi[iVertex * 2]; // 2 random reads --- we could put bools into shared easily
if (b_out || b_here) {
// use longitudinal flows
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
f64 long_contrib = TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
// printf("ION %d : %d T T_out %1.8E %1.8E T_put T_putout %1.8E %1.8E cont %1.9E long %1.9E\n",
// iVertex, indexneigh, shared_T[threadIdx.x], T_out, T_here2, Tout2, contrib, long_contrib);
// if (((T_here2 < Tout2) && (contrib < 0.0)) || ((T_here2 > Tout2) && (contrib > 0.0))) {
// Either we are less but shrinking or more but growing
contrib = long_contrib;
};
};
ourrates.NiTi += contrib;
}; // scoping brace
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifndef BWDSIDET
T_clock = T_outk;
T_outk = T_anti;
#else
T_clock = T_out;
T_out = T_anti;
#endif
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
}; // mask
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_e + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_e + 2 * iVertex, 2 * sizeof(f64));
}
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_major[iVertex].Te;
}
else {
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
if ((bUseMask) && (bMask[2] == 0)) return;
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Te;
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Te;
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
//#ifdef BWDSIDET
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_clock = T_outk;
//#endif
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Te;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Te;
}
#endif
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
//
// if (T_anti == 0.0) {
//#ifdef BWDSIDET
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_anti = T_outk;
//#endif
// }; // So we are receiving 0 then doing this. But how come?
// mimic
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
if (0) {
printf("%d : %d endpt_anti %1.9E %1.9E SHARED endpt_clock %1.9E %1.9E izTri[iNeigh] %d\n",
iVertex, iNeigh, endpt_anti.x, endpt_anti.y, endpt_clock.x, endpt_clock.y, izTri[iNeigh]);
}
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
if (0) {
printf("%d : %d endpt_anti %1.9E %1.9E GLOBAL endpt_clock %1.9E %1.9E izTri[iNeigh] %d\n",
iVertex, iNeigh, endpt_anti.x, endpt_anti.y, endpt_clock.x, endpt_clock.y, izTri[iNeigh]);
}
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
// It decided to rotate something it shouldn't oughta. Rotated tri 23600 = tri 2 for 11582.
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_e[izTri[iNeigh]];
nu = 0.5*p_nu_e[izTri[iNeigh]];
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_e[izTri[iPrev]];
nu += 0.5*p_nu_e[izTri[iPrev]];
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
// Use longitudinal:
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
f64_vec3 omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
f64 long_contrib = TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
ourrates.NeTe += long_contrib;
if (TESTHEATFULL) printf("iVertex %d iNeigh %d long_contrib %1.14E T_out %1.9E ours %1.9E kappa_par %1.9E factor %1.9E\n",
iVertex, iNeigh, long_contrib,
T_out, shared_T[threadIdx.x], kappa_parallel,
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (edgelen*(nu * nu + omega.dot(omega))));
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
grad_T.x = 0.5*(
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
f64 contrib = TWOTHIRDS * kappa_parallel *(
edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y - nu * omega.z)*grad_T.y)
+ edge_normal.y*((omega.x*omega.y + nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
) / (nu * nu + omega.dot(omega));
// Rule 1. Not a greater flow than isotropic
// Rule 2. Not the opposite direction to isotropic - minimum zero
f64 iso_contrib = TWOTHIRDS * kappa_parallel *(edge_normal.x*grad_T.x + edge_normal.y*grad_T.y);
if (TESTHEATFULL) printf(
"iVertex %d iNeigh %d contrib %1.9E iso_contrib %1.9E \n"
"edge_normal %1.8E %1.8E \n"
"T %1.9E Tout %1.9E T_anti %1.9E T_clock %1.9E\n"
" kappa_par %1.9E nu %1.9E |omega| %1.9E Area %1.9E\n"
"our_n %1.9E our n_n %1.9E nearby n %1.9E %1.9E\n"
"pos %1.8E %1.8E opp %1.8E %1.8E anti %1.8E %1.8E clock %1.8E %1.8E\n"
"omega %1.8E %1.8E grad_T %1.9E %1.9E \n"
"=================================================\n"
, iVertex, iNeigh,
contrib, iso_contrib,
edge_normal.x, edge_normal.y, shared_T[threadIdx.x], T_out, T_anti, T_clock,
kappa_parallel, nu, sqrt(omega.dot(omega)),
p_AreaMajor[iVertex],
p_n_major[iVertex].n, p_n_major[iVertex].n_n, p_n_major[indexneigh].n, p_n_major[indexneigh].n_n,
info.pos.x, info.pos.y, pos_out.x, pos_out.y, pos_anti.x, pos_anti.y, pos_clock.x, pos_clock.y,
omega.x, omega.y, grad_T.x, grad_T.y);
if (TESTHEATFULL) printf("shared B[threadIdx.x] %1.10E %1.10E B_out %1.10E %1.10E\n",
shared_B[threadIdx.x].x, shared_B[threadIdx.x].y, B_out.x, B_out.y);
if (contrib > 0.0) {
if ((iso_contrib > 0.0) && (contrib > iso_contrib)) contrib = iso_contrib;
if (iso_contrib < 0.0) contrib = 0.0;
}
else {
if ((iso_contrib < 0.0) && (contrib < iso_contrib)) contrib = iso_contrib;
if (iso_contrib > 0.0) contrib = 0.0;
}
if (bCheckWhetherToDoctorUp) {
// Now ask if this flow is going uphill:
bool b_out = p_bool_longi[indexneigh * 2 + 1];
bool b_here = p_bool_longi[iVertex * 2 + 1];
if (b_out || b_here) {
// use longitudinal flows
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
f64 long_contrib = TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
// printf("ELEC %d : %d T T_out %1.8E %1.8E T_put T_putout %1.8E %1.8E cont %1.9E long %1.9E\n",
// iVertex, indexneigh, shared_T[threadIdx.x], T_out, T_here2, Tout2, contrib, long_contrib);
// if (((T_here2 < Tout2) && (contrib < 0.0)) || ((T_here2 > Tout2) && (contrib > 0.0))) {
// Either we are less but shrinking or more but growing
contrib = long_contrib;
if (TESTHEATFULL) printf("contrib = long contrib %1.14E \n", contrib);
};
};
if (TESTHEATFULL) printf("iVertex %d ourrates.NeTe before: %1.14E contrib %1.12E\n", iVertex, ourrates.NeTe, contrib);
ourrates.NeTe += contrib;
if (TESTHEATFULL) printf("iVertex %d ourrates.NeTe after: %1.14E \n", iVertex, ourrates.NeTe);
}
};
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifdef BWDSIDET
T_clock = T_out;
T_out = T_anti;
#else
T_clock = T_outk;
T_outk = T_anti;
#endif
}; // next iNeigh
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
};
}
__global__ void kernelCreatePutativeT(
f64 hsub,
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_k,
// T3 * __restrict__ p_T_putative,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
NTrates * __restrict__ NTadditionrates,
bool * __restrict__ p_boolarray, // 2x NMAJOR
bool * __restrict__ p_bFailedtest,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskBlock, // do 1 for all species
bool bUseMask
)
{
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
if (iVertex == VERTCHOSEN) printf("kernelCreatePutative %d : %d %d \n", iVertex,
(bUseMask) ? 1 : 0, (p_bMaskBlock[blockIdx.x]) ? 1 : 0);
if ((bUseMask) && (p_bMaskBlock[blockIdx.x] == 0)) return;
bool bMask[3];
if (bUseMask) {
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + 2 * NUMVERTICES];
if (iVertex == VERTCHOSEN) printf("kernelCreatePutative %d : %d %d \n", iVertex,
(bMask[1]) ? 1 : 0, (bMask[2]) ? 1 : 0);
if ((bMask[1] == 0) && (bMask[2] == 0)) return; // we do nothing here with neutrals
};
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
T3 T_k = p_T_k[iVertex];
nvals n = p_n_major[iVertex];
f64 AreaMajor = p_AreaMajor[iVertex];
NTrates NT = NTadditionrates[iVertex];
T3 T_put;
// T_put.Tn = T_k.Tn + hsub* NT.NeTe / (n.n_n*AreaMajor); // serves no purpose...
T_put.Ti = T_k.Ti + hsub*NT.NiTi / (n.n*AreaMajor);
T_put.Te = T_k.Te + hsub*NT.NeTe / (n.n*AreaMajor);
//if (iVertex == VERTCHOSEN) printf("%d T_e_k %1.8E NeTe %1.8E N %1.8E T_put %1.8E\n",
// iVertex, T_k.Te, NT.NeTe, (n.n*AreaMajor), T_put.Te);
if (iVertex == VERTCHOSEN) printf("kernelCreatePutative %d : T_put.Te %1.10E NeTe %1.10E \n", iVertex,
T_put.Te, NT.NeTe);
bool ourbools[2];
bool bAlert = false;
memcpy(ourbools, p_boolarray + 2 * iVertex, sizeof(bool) * 2);
//if (iVertex == VERTCHOSEN) printf("%d Te_putative %1.10E NT.NeTe %1.10E ourbool %d bAlert %d\n", iVertex, T_put.Te, NT.NeTe,
// ourbools[1]?1:0, bAlert?1:0);
if (((bUseMask == 0) || (bMask[1] == true)) && (T_put.Ti < 0.0)) {
if (ourbools[0] == 0) bAlert = true;
ourbools[0] = true;
};
if (((bUseMask == 0) || (bMask[2] == true)) && (T_put.Te < 0.0)) {
if (ourbools[1] == 0) bAlert = true;
ourbools[1] = true;
};
if (iVertex == VERTCHOSEN) printf("kernelCreatePutative %d :ourbools[1] %d \n", iVertex,
(ourbools[1]) ? 1 : 0);
memcpy(p_boolarray + 2 * iVertex, ourbools, sizeof(bool) * 2);
if (bAlert) p_bFailedtest[blockIdx.x] = true;
}
__global__ void kernelReturnNumberNegativeT(
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T,
long * __restrict__ p_sum
)
{
__shared__ long sum[threadsPerTileMajorClever];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
sum[threadIdx.x] = 0;
if (info.flag == DOMAIN_VERTEX) {
T3 T = p_T[iVertex];
if ((T.Tn < 0.0) || (T.Ti < 0.0) || (T.Te < 0.0))
{
printf("iVertex %d T %1.8E %1.8E %1.8E flag %d pos %1.10E %1.10E\n", iVertex, T.Tn, T.Ti, T.Te, info.flag,
info.pos.x, info.pos.y);
sum[threadIdx.x] = 1;
// Really does find only 1 -- 19498 Ti. Could spit out more about
// why it happened.
// So is there an alternative?
}
// worth it? can we easier/better just blitz the out-of-domain T to 0 and load it?
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum[threadIdx.x] += sum[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum[threadIdx.x] += sum[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sum[blockIdx.x] = sum[0];
};
}
/*
__global__ void kernelSetNeighboursBwd(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izNeigh_vert,
bool * __restrict__ p_bMask3)
{
Won't work because it needs separate src and dest memory.
__shared__ bool bMask[threadsPerTileMajorClever][3];
long const iVertex = threadIdx.x + blockIdx.x*blockDim.x;
memcpy(bMask[threadIdx.x], p_bMask3 + 3 * iVertex, sizeof(bool) * 3);
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
// check row-major meaning.
__syncthreads();
bool bMask3[3], bMaskNeigh[3];
bMask3[0] = bMask[threadIdx.x][0];
bMask3[1] = bMask[threadIdx.x][1];
bMask3[2] = bMask[threadIdx.x][2];
structural info = p_info_minor[iVertex];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
long izNeigh[MAXNEIGH_d];
memcpy(izNeigh, p_izNeigh_vert + MAXNEIGH_d*iVertex, sizeof(long)*MAXNEIGH_d);
#pragma unroll MAXNEIGH_d
for (int i = 0; (i < info.neigh_len); i++)
{
long indexneigh = izNeigh[i];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
memcpy(bMaskNeigh, bMask[indexneigh - StartMajor], sizeof(bool) * 3);
} else {
memcpy(bMaskNeigh, p_bMask3 + 3 * indexneigh, sizeof(bool) * 3);
};
if (bMaskNeigh[0]) bMask3[0] = true;
if (bMaskNeigh[1]) bMask3[1] = true;
if (bMaskNeigh[2]) bMask3[2] = true;
};
if ((bMaskNeigh[0]) || (bMaskNeigh[1]) || (bMaskNeigh[2])) {
memcpy(p_bMask3 + 3 * iVertex, bMask, sizeof(bool) * 3);
// otherwise, it was 0 to start with; let it still be 0.
};
}
}
*/
__global__ void kernelSetBlockMaskFlag_CountEquations_reset_Tk(
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskBlock,
long * __restrict__ p_longblock3,
T3 * __restrict__ p_T_k,
T3 * __restrict__ p_T
)
{
__shared__ bool bAlert[3];
__shared__ long sum0[threadsPerTileMajorClever];
__shared__ long sum1[threadsPerTileMajorClever];
__shared__ long sum2[threadsPerTileMajorClever]; // need to save all 3 values
if (threadIdx.x < 3)
bAlert[threadIdx.x] = 0;
sum0[threadIdx.x] = 0.0;
sum1[threadIdx.x] = 0.0;
sum2[threadIdx.x] = 0.0;
__syncthreads();
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask[3];
//memcpy(bMask, p_bMask3 + 3 * iVertex, sizeof(bool) * 3);
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + NUMVERTICES*2];
if (bMask[0]) {
bAlert[0] = true;
sum0[threadIdx.x]++;
};
if (bMask[1]) {
bAlert[1] = true;
sum1[threadIdx.x]++;
printf("Ion: %d\n", iVertex);
};
if (bMask[2]) {
bAlert[2] = true; // maybe this does not work.
sum2[threadIdx.x]++;
printf("Elec: %d | ", iVertex);
};
if ((bMask[0]) || (bMask[1]) || (bMask[2]))
{
T3 T = p_T[iVertex];
T3 Tk = p_T_k[iVertex];
if (bMask[0]) T.Tn = Tk.Tn;
if (bMask[1]) T.Ti = Tk.Ti;
if (bMask[2]) T.Te = Tk.Te;
p_T[iVertex] = T;
}
__syncthreads();
// if (iVertex == VERTCHOSEN) printf(" %d bAlert %d %d %d \n",
// iVertex, bAlert[0] ? 1 : 0, bAlert[1] ? 1 : 0, bAlert[2] ? 1 : 0);
if (threadIdx.x == 0) {
p_bMaskBlock[blockIdx.x] = (bAlert[0] || bAlert[1] || bAlert[2]);
}
// all this doing but we want to split into species solves anyway.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum0[threadIdx.x] += sum0[threadIdx.x + k];
sum1[threadIdx.x] += sum1[threadIdx.x + k];
sum2[threadIdx.x] += sum2[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum0[threadIdx.x] += sum0[threadIdx.x + s - 1];
sum1[threadIdx.x] += sum1[threadIdx.x + s - 1];
sum2[threadIdx.x] += sum2[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_longblock3[blockIdx.x*3] = sum0[0];
p_longblock3[blockIdx.x*3+1] = sum1[0];
p_longblock3[blockIdx.x*3+2] = sum2[0];
};
}
__global__ void kernelCompareForStability_andSetFlag(
structural * __restrict__ p_info_minor,
NTrates * __restrict__ p_NTrates1,
NTrates * __restrict__ p_NTrates2,
long * __restrict__ p_sum,
bool * __restrict__ p_bMask3
)
{
__shared__ long sum[threadsPerTileMajorClever];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask[3];
sum[threadIdx.x] = 0;
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
if (info.flag == DOMAIN_VERTEX) {
NTrates dNTdt1 = p_NTrates1[iVertex];
NTrates dNTdt2 = p_NTrates2[iVertex];
//memcpy(bMask, p_bMask3 + iVertex * 3, sizeof(bool) * 3);
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + NUMVERTICES * 2];
// we want to check if 2 is greater magnitude than 1 and reversed sign
if ((dNTdt2.NnTn*dNTdt1.NnTn < 0.0)
&& (fabs(dNTdt2.NnTn) > fabs(dNTdt1.NnTn)))
{
sum[threadIdx.x]++;
bMask[0] = 1;
}
if ((dNTdt2.NiTi*dNTdt1.NiTi < 0.0)
&& (fabs(dNTdt2.NiTi) > fabs(dNTdt1.NiTi))) {
sum[threadIdx.x]++;
bMask[1] = 1;
}
if ((dNTdt2.NeTe*dNTdt1.NeTe < 0.0)
&& (fabs(dNTdt2.NeTe) > fabs(dNTdt1.NeTe))) {
sum[threadIdx.x]++;
bMask[2] = 1;
};
p_bMask3[iVertex] = bMask[0];
p_bMask3[iVertex + NUMVERTICES] = bMask[1];
p_bMask3[iVertex + NUMVERTICES * 2] = bMask[2];
};
// non domain mask flags already set to 0
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum[threadIdx.x] += sum[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum[threadIdx.x] += sum[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sum[blockIdx.x] = sum[0];
};
}
__global__ void kernelCreatePutativeTandsave(
f64 hsub,
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_k,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
NTrates * __restrict__ NTadditionrates,
T3 * __restrict__ p_T_dest,
bool * bMask3
)
{
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
bool bMask[3];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
T3 T_k = p_T_k[iVertex];
nvals n = p_n_major[iVertex];
f64 AreaMajor = p_AreaMajor[iVertex];
NTrates NT = NTadditionrates[iVertex];
T3 T_put;
T_put.Tn = T_k.Tn + hsub* NT.NeTe / (n.n_n*AreaMajor);
T_put.Ti = T_k.Ti + hsub*NT.NiTi / (n.n*AreaMajor);
T_put.Te = T_k.Te + hsub*NT.NeTe / (n.n*AreaMajor);
p_T_dest[iVertex] = T_put;
memset(bMask, 0, sizeof(bool) * 3);
if (T_put.Tn < 0.0) bMask[0] = 1;
if (T_put.Ti < 0.0) bMask[1] = 1;
if (T_put.Te < 0.0) bMask[2] = 1;
} else {
memset(bMask, 0, sizeof(bool) * 3);
}
if (iVertex == 22351) printf("22351 info.flag %d bMask %d %d %d \n",
info.flag, (bMask[0] ? 1 : 0), (bMask[1] ? 1 : 0), (bMask[2] ? 1 : 0));
bMask3[iVertex] = bMask[0];
bMask3[iVertex + NUMVERTICES] = bMask[1];
bMask3[iVertex + 2 * NUMVERTICES] = bMask[2];
//memcpy(bMask3 + iVertex * 3, bMask, sizeof(bool) * 3);
}
__global__ void kernelIonisationRates_Forward_Euler(
f64 const h_use,
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_major,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
NTrates * __restrict__ NTadditionrates,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
// We are in major cells so actually output this to a fresh temp array (9 scalars)
// which we then share out into minor cells.
v4 * __restrict__ p_v,
f64_vec3 * __restrict__ p_v_n,
T3 * __restrict__ p_T_use_major,
bool b_useTuse
)
// ** SIMPLIFIED VERSION **
{
#define SAFETY_FACTOR 1.2
#define LEEWAY 1.0e-23
#define vAC 218687393.0 // Alfven Critical velocity = sqrt(13.6*1.6e-12*2/me)
long const iVertex = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
NTrates ourrates;
f64_vec3 MAR_neut, MAR_ion, MAR_elec;
v4 v;
f64_vec3 v_n;
f64 T_use;
if (info.flag == DOMAIN_VERTEX)
{
// case DOMAIN_VERTEX:
f64 lambda;
f64 AreaMajor = p_AreaMajor[iVertex];
T3 T_k = p_T_major[iVertex];
if (b_useTuse) {
T3 T = p_T_use_major[iVertex];
T_use = T.Te;
}
else {
T_use = T_k.Te;
}
nvals our_n = p_n_major[iVertex];
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(&MAR_neut, p_MAR_neut + iVertex, sizeof(f64_vec3)); // are we passing stuff from central then?
memcpy(&MAR_ion, p_MAR_ion + iVertex, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec + iVertex, sizeof(f64_vec3)); // it does mean d/dt (Nv)
memcpy(&v, p_v + iVertex, sizeof(v4));
memcpy(&v_n, p_v_n + iVertex, sizeof(f64_vec3));
// 0 . What is lambda?
f64 oldT1;
f64 n_k = our_n.n;
f64 n_n_k = our_n.n_n;
f64 n_kplus1, n_n_kplus1, n_kplus2;
f64 Gamma_ion, Gamma_rec, hn, hnn, Delta_ionise, Delta_rec;
// lambda = 0.5*reduced mass*w0.dot(w0) / T_k.Te;
f64 w0z = v.vez - v_n.z;
// What is capital Theta of T_k ?
//f64 w = sqrt(w0z*w0z); // WE ARE ONLY USING Z DIMENSION FOR ABSORBING KINETIC ENERGY
// Check again: how did we come up with the following formulas?
// Off of the lambda spreadsheet or the v spreadsheet? I think lambda.
f64 T_use_theta = T_k.Te;
if (T_use_theta < 1.0e-12) T_use_theta = 1.0e-12;
f64 Theta = (1.1 + 0.4e-12 / T_use_theta);
if (w0z < vAC - 0.4e-4 / T_use_theta) {
//Theta *= exp(-w*(vC - 0.4e-4 / T_use_theta - w)*1.0e-12
// / (0.25*(vC - 0.4e-4 / T_use_theta)*(vC - 0.4e-4 / T_use_theta)*T_use_theta));
// Multiply through to save on divisions?:
Theta *= exp(-w0z*((vAC - w0z)* T_use_theta - 0.4e-4)*1.0e-12 /
(0.25*(vAC* T_use_theta - 0.4e-4)*(vAC* T_use_theta - 0.4e-4)));
};
// Available KE:
f64 Kconv = 0.5*m_e*m_n*n_k*n_n_k*(w0z*w0z) / (m_e*n_k + m_n*n_n_k);
f64 coeff_on_ionizing = 0.5*T_k.Tn - 2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv);
// Now compute f(Tk) = T_k+1 given using T_k
f64 w = sqrt(0.5*(w0z*w0z + (v.vxy.x - v_n.x)*(v.vxy.x - v_n.x) + (v.vxy.y - v_n.y)*(v.vxy.y - v_n.y))); // CORRECTION FACTOR 0.5 ...
f64 T_image1, T2, T_image2, T_oldimage1, Tkplus2minus1;
hn = h_use*n_k;
hnn = h_use*n_k*n_k;
f64 T1 = T_use; // first go. = Tk if b_useTuse == false.
{
Gamma_ion = GetIonizationRates(T1, w, &Gamma_rec);
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
T_image1 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
}
T2 = T_image1;
// Skip over algorithm:
if (Delta_ionise != Delta_ionise) printf("Nandelta %d Tuse %1.10E w %1.8E Gamma %1.10E rec %1.10E w0z %1.10E Kconv %1.10E\n", iVertex, T_use, w, Gamma_ion, Gamma_rec, w0z, Kconv);
//if (iVertex == 16700) printf("Delta_ionise %1.10E rec %1.10E \n", Delta_ionise, Delta_rec);
//f64 TeeV = T1/ kB;
//f64 Tesq = TeeV*TeeV;
//f64 Te3 = TeeV*Tesq;
//f64 Te4 = Tesq*Tesq;
//f64 calc1 = (ionize_coeffs[0][0][4] + ionize_coeffs[0][0][3] * TeeV
// + ionize_coeffs[0][0][2] * Tesq + ionize_coeffs[0][0][1] * Te3
// + ionize_coeffs[0][0][0] * Te4);
//f64 calc2 = (ionize_coeffs[0][0][0] + ionize_coeffs[0][0][1] * TeeV
// + ionize_coeffs[0][0][2] * Tesq + ionize_coeffs[0][0][3] * Te3
// + ionize_coeffs[0][0][4] * Te4);
//if (iVertex == 16700) printf("ionize_coeffs[0][0] %1.12E %1.12E %1.12E %1.12E %1.12E \n"
// "TeeV %1.12E calc1 %1.12E calc2 %1.12E exp(calc1) %1.12E exp(calc2) %1.12E\n",
// ionize_coeffs[0][0][0], ionize_coeffs[0][0][1], ionize_coeffs[0][0][2], ionize_coeffs[0][0][3], ionize_coeffs[0][0][4],
// TeeV, calc1, calc2, exp(calc1), exp(calc2));
f64 dNdt_ionise = AreaMajor*Delta_ionise / h_use;
f64 dNdt_recombine = AreaMajor*Delta_rec / h_use;
ourrates.N += dNdt_ionise - dNdt_recombine;
ourrates.Nn += dNdt_recombine - dNdt_ionise;
// Store existing energy density:
f64 Energy_k = 1.5*(n_k*(T_k.Te + T_k.Ti) + n_n_k*T_k.Tn) +
0.5*((m_e + m_i)*n_k*(v.vxy.dot(v.vxy)) + m_e*n_k*v.vez*v.vez + m_i*n_k*v.viz*v.viz + m_n*n_n_k*v_n.dot(v_n));
// 1. Calculate kinetic energy absorption impact on vez, vnz
// ie Ionization resistance to current
n_kplus1 = n_k + Delta_ionise - Delta_rec;
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
// Absorbed DKE:
f64 deltaKE = -(2.0*Theta*Kconv / (3.0*n_k*T_k.Te + 2.0*Theta*Kconv))*Delta_ionise*13.6*kB;
f64 new_vz_diff = sqrt(m_e*n_kplus1 + m_n*n_n_kplus1*
((n_k*n_n_k / (m_e*n_k + m_n*n_n_k))*(w0z*w0z) + 2.0*deltaKE / (m_e*m_n)) /
n_kplus1*n_n_kplus1);
f64 delta_vez = m_n*n_n_kplus1*(w0z + new_vz_diff) /
(m_n*n_n_kplus1 + m_e*n_kplus1);
f64 delta_vnz = -m_e*n_kplus1*delta_vez / (m_n*n_n_kplus1);
// Check: w0 = vez-vnz - tick
// should change to scalar.
MAR_neut.z += AreaMajor*n_n_kplus1*delta_vnz / h_use;
MAR_elec.z += AreaMajor*n_kplus1*delta_vez / h_use;
f64_vec3 ve_kplus1, vi_kplus1, vn_kplus1;
// Store alongside: v_k+1 so that we can follow the anticipated change in energy,
// to create energy balance:
ve_kplus1.x = v.vxy.x*(n_k / n_kplus1);
ve_kplus1.y = v.vxy.y*(n_k / n_kplus1);
ve_kplus1.z = v.vez*(n_k / n_kplus1) + delta_vez; // we need to store v, we could also store nv if we wanted.
vi_kplus1.x = v.vxy.x*(n_k / n_kplus1);
vi_kplus1.y = v.vxy.y*(n_k / n_kplus1);
vi_kplus1.z = v.viz*(n_k / n_kplus1);
vn_kplus1 = v_n*(n_n_k / n_n_kplus1);
vn_kplus1.z += delta_vnz;
// 2. Add the effect of xfers on momenta:
// What does MAR_neut mean? Nv?
{
f64_vec3 v_use;
v_use.x = v.vxy.x;
v_use.y = v.vxy.y;
v_use.z = (m_e*v.vez + m_i*v.viz) / (m_e + m_i);
MAR_neut += -dNdt_ionise*v_n + dNdt_recombine*v_use;
MAR_ion += dNdt_ionise*v_n - dNdt_recombine*v_use;
MAR_elec += dNdt_ionise*v_n - dNdt_recombine*v_use;
vn_kplus1 -= (Delta_ionise*v_n - Delta_rec*v_use) / n_n_kplus1;
// n_k+1 v_k+1 = n_k v_k + Delta_n*v_use => v_k+1 = (n_k/n_k+1) v_k + (Delta_n/n_k+1) v_use
vi_kplus1 += (Delta_ionise*v_n - Delta_rec*v_use) / n_kplus1;
ve_kplus1 += (Delta_ionise*v_n - Delta_rec*v_use) / n_kplus1;
}
if (MAR_elec.z != MAR_elec.z) printf("ivertex %d MAR_elec nan\n", iVertex);
// . Ionization cooling & recombination heating
//f64 coeff_on_ionizing = 0.5*T_k.Tn - 2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv);
// ourrates.NeTe +=
// dNdt_recombine*2.0*13.6*kB / 3.0
// - (2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv))*dNdt_ionise;
// We can drop this: it will be accounted for by the final energy balance.
// 3. Add to nT for x-fers due to species converting
ourrates.NiTi += 0.5*dNdt_ionise*T_k.Tn;
ourrates.NeTe += 0.5*dNdt_ionise*T_k.Tn;
ourrates.NnTn -= dNdt_ionise*T_k.Tn;
f64 nTe_kplus1 = T_k.Te*(n_k)+0.5*Delta_ionise*T_k.Tn;
f64 nTi_kplus1 = T_k.Ti*(n_k)+0.5*Delta_ionise*T_k.Tn;
f64 n_nTn_kplus1 = T_k.Tn*(n_n_k)-Delta_ionise*T_k.Tn;
// 4. Energy balance through Te:
// Maybe we should rather be seeking OVERALL energy balance where KE_result is from n_k+1, v_k+1
// and we ensure that we have lost the right amount of energy overall.
// That is the better way:
f64 KE_result = 0.5*(m_e*n_kplus1*ve_kplus1.dot(ve_kplus1) + m_i*n_kplus1*vi_kplus1.dot(vi_kplus1)
+ m_n*n_n_kplus1*vn_kplus1.dot(vn_kplus1));
f64 Energy_density_kplus1 = KE_result + 1.5*(nTe_kplus1 + nTi_kplus1 + n_nTn_kplus1);
f64 Energy_density_target = Energy_k - 13.6*kB*(Delta_ionise - Delta_rec);
// Additional_heat = (KE_k + deltaKE) - KE_result; // usually positive
// 1*1+3*3 > 2*2 + 2*2 so KE is generally decreasing by friction; KE_result < KE_k+deltaKE
// KE_result + Added_heat + existing heat = desired total energy = KE_k + heat_k + deltaKE
// 1.5 nT += Frictional_heating
// NTe += (2/3) Area Frictional_heating
ourrates.NeTe += 2.0*AreaMajor*
(Energy_density_target - Energy_density_kplus1) / (3.0*h_use);
// All this stuff is wrong - see full routine.
// DEBUG:
if (TEST_IONIZE) printf("iVertex %d n_k %1.9E N_k %1.9E Te_k %1.9E NeTe %1.9E h*NeTe %1.9E \n"
"Ti_k %1.9E h*NiTi %1.9E Tn_k %1.9E h*NnTn %1.9E \n"
"Delta_ionise %1.9E rec %1.9E \n",
iVertex, n_k, n_k*AreaMajor, T_k.Te, ourrates.NeTe, h_use*ourrates.NeTe,
T_k.Ti, h_use*ourrates.NiTi, T_k.Tn, h_use*ourrates.NnTn,
Delta_ionise, Delta_rec
);
// DEBUG:
if (n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use < 0.0)
printf("%d Predicted Te %1.9E \n", iVertex, (n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use)/(n_k*AreaMajor));
// DEBUG:
if ((ourrates.NeTe != ourrates.NeTe)) printf("Nan NeTe %d \n", iVertex);
if ((ourrates.NeTe != ourrates.NeTe)) printf("Nan NeTe %d \n", iVertex);
if (MAR_elec.z != MAR_elec.z) printf("Nan MAR_elec.z %d \n", iVertex);
if (MAR_elec.x != MAR_elec.x) printf("Nan MAR_elec.x %d \n", iVertex);
if (MAR_neut.x != MAR_neut.x) printf("Nan MAR_neut.x %d \n", iVertex);
if (MAR_ion.y != MAR_ion.y) printf("Nan MAR_ion.y %d \n", iVertex);
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
memcpy(p_MAR_neut + iVertex, &MAR_neut, sizeof(f64_vec3));
memcpy(p_MAR_ion + iVertex, &MAR_ion, sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex, &MAR_elec, sizeof(f64_vec3));
//******************************************************************************************************
//// f64 TeV = T.Te * one_over_kB;
//// We loaded in ourrates.NT which indicates the new heat available so we should include some of that.
//// The main impact will be from heat conduction; dN/dt due to advection neglected here.
//f64 TeV = one_over_kB * (T.Te*our_n.n*AreaMajor + h_use*ourrates.NeTe)/
// (our_n.n*AreaMajor + h_use*ourrates.N);
//// Should be very careful here: ourrates.NeTe can soak to neutrals on timescale what? 1e-11?
//if (TeV < 0.0) {
// printf("\n\niVertex %d : ourrates.N %1.14E denominator %1.14E \n"
// " AreaMajor %1.14E TeV %1.14E ourrates.NeTe %1.10E h %1.10E \n"
// "ourrates.Nn %1.10E n %1.10E n_n %1.10E Te %1.10E Tn %1.10E \n\n",
// iVertex, ourrates.N,
// (our_n.n*AreaMajor + h_use*ourrates.N),
// AreaMajor, TeV, ourrates.NeTe, h_use,
// ourrates.Nn, our_n.n, our_n.n_n, T.Te, T.Tn);
//
//}
//f64 sqrtT = sqrt(TeV);
//f64 temp = 1.0e-5*exp(-13.6 / TeV) / (13.6*(6.0*13.6 + TeV)); // = S / T^1/2
// // Let h n n_n S be the ionising amount,
// // h n S is the proportion of neutrals! Make sure we do not run out!
////f64 hnS = (h_use*our_n.n*TeV*temp) / (sqrtT + h_use * our_n.n_n*temp*SIXTH*13.6);
// // d/dt (sqrtT) = 1/2 dT/dt T^-1/2.
// // dT[eV]/dt = -TWOTHIRDS * 13.6* n_n* sqrtT *temp
// // d/dt (sqrtT) = -THIRD*13.6*n_n*temp;
//// kind of midpoint, see SIXTH not THIRD:
//f64 Model_of_T_to_half = TeV / (sqrtT + h_use*SIXTH*13.6*our_n.n_n*temp / (1.0 - h_use*(our_n.n_n - our_n.n)*temp*sqrtT));
//f64 hS = h_use*temp*Model_of_T_to_half;
//
//// NEW:
//f64 ionise_rate = AreaMajor * our_n.n_n * our_n.n*hS /
// (h_use*(1.0 + hS*(our_n.n-our_n.n_n))); // dN/dt
//ourrates.N += ionise_rate;
//ourrates.Nn += -ionise_rate;
//// Let nR be the recombining amount, R is the proportion.
//TeV = T.Te * one_over_kB;
//f64 Ttothe5point5 = sqrtT * TeV * TeV*TeV * TeV*TeV;
//f64 hR = h_use * (our_n.n * our_n.n*8.75e-27*TeV) /
// (Ttothe5point5 + h_use * 2.25*TWOTHIRDS*13.6*our_n.n*our_n.n*8.75e-27);
//// T/T^5.5 = T^-4.5
//// T/(T^5.5+eps) < T^-4.5
//// For some reason I picked 2.25 = 4.5/2 instead of 5.5/2.
//// But basically it looks reasonable.
//// Maybe the additional stuff is an estimate of the change in T[eV]^5.5??
//// d/dt T^5.5 = 5.5 T^4.5 dT/dt
//// dT/dt = TWOTHIRDS * 13.6*( hR / h_use) = TWOTHIRDS * 13.6*( n^2 8.75e-27 T^-4.5)
//// d/dt T^5.5 = 5.5 TWOTHIRDS * 13.6*( n^2 8.75e-27 )
//f64 recomb_rate = AreaMajor * our_n.n * hR / h_use; // could reasonably again take hR/(1+hR) for n_k+1
//ourrates.N -= recomb_rate;
//ourrates.Nn += recomb_rate;
//if (TEST) printf("%d recomb rate %1.10E ionise_rate %1.10E our_n.n %1.10E nn %1.10E hR %1.10E hS %1.10E\n"
// "h_use %1.8E sqrtTeV %1.10E Ttothe5point5 %1.9E Te %1.9E modelThalf %1.9E\n", iVertex,
// recomb_rate, ionise_rate, our_n.n, our_n.n_n, hR, hS, h_use, sqrtT, Ttothe5point5, T.Te, Model_of_T_to_half);
//ourrates.NeTe += -TWOTHIRDS * 13.6*kB*(ionise_rate - recomb_rate) + 0.5*T.Tn*ionise_rate;
//ourrates.NiTi += 0.5*T.Tn*ionise_rate;
//ourrates.NnTn += (T.Te + T.Ti)*recomb_rate;
//if (TEST) {
// printf("kernelIonisation %d NeTe %1.12E NiTi %1.12E NnTn %1.12E\n"
// "due to I+R : NeTe %1.12E NiTi %1.12E NnTn %1.12E\n"
// "d/dtNeTe/N %1.9E d/dtNiTi/N %1.9E d/dtNnTn/Nn %1.9E \n\n",
// iVertex, ourrates.NeTe, ourrates.NiTi, ourrates.NnTn,
// -TWOTHIRDS * 13.6*kB*(ionise_rate - recomb_rate) + 0.5*T.Tn*ionise_rate,
// 0.5*T.Tn*ionise_rate,
// (T.Te + T.Ti)*recomb_rate,
// ourrates.NeTe / (our_n.n*AreaMajor), ourrates.NiTi / (our_n.n*AreaMajor), ourrates.NnTn / (our_n.n_n*AreaMajor));
//};
//memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
};
}
__global__ void kernelIonisationRates(
f64 const h_use,
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_major,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
NTrates * __restrict__ NTadditionrates,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
// We are in major cells so actually output this to a fresh temp array (9 scalars)
// which we then share out into minor cells.
v4 * __restrict__ p_v,
f64_vec3 * __restrict__ p_v_n,
T3 * __restrict__ p_T_use_major,
bool b_useTuse
)
{
#define SAFETY_FACTOR 1.2
#define LEEWAY 1.0e-23
#define vAC 218687393.0 // Alfven Critical velocity = sqrt(13.6*1.6e-12*2/me)
long const iVertex = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
NTrates ourrates;
f64_vec3 MAR_neut, MAR_ion, MAR_elec;
v4 v;
f64_vec3 v_n;
f64 T_use;
bool bZero_out = false;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
// case DOMAIN_VERTEX:
f64 lambda;
f64 AreaMajor = p_AreaMajor[iVertex];
T3 T_k = p_T_major[iVertex];
if (b_useTuse) {
T3 T = p_T_use_major[iVertex];
T_use = T.Te;
} else {
T_use = T_k.Te;
}
nvals our_n = p_n_major[iVertex];
f64 fac_uplift = ArtificialUpliftFactor(our_n.n, our_n.n_n);
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(&MAR_neut, p_MAR_neut + iVertex, sizeof(f64_vec3)); // are we passing stuff from central then?
memcpy(&MAR_ion, p_MAR_ion + iVertex, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec + iVertex, sizeof(f64_vec3)); // it does mean d/dt (Nv)
memcpy(&v, p_v + iVertex, sizeof(v4));
memcpy(&v_n, p_v_n + iVertex, sizeof(f64_vec3));
if (TEST_IONIZE) printf("iVertex %d ourrates.NeTe original %1.10E \n", iVertex, ourrates.NeTe);
// 0 . What is lambda?
f64 oldT1;
f64 n_k = our_n.n;
f64 n_n_k = our_n.n_n;
f64 n_kplus1, n_n_kplus1, n_kplus2;
f64 Gamma_ion, Gamma_rec, hn, hnn, Delta_ionise, Delta_rec;
// lambda = 0.5*reduced mass*w0.dot(w0) / T_k.Te;
f64 w0z = v.vez - v_n.z;
// What is capital Theta of T_k ?
//f64 w = sqrt(w0z*w0z); // WE ARE ONLY USING Z DIMENSION FOR ABSORBING KINETIC ENERGY
// Check again: how did we come up with the following formulas?
// Off of the lambda spreadsheet or the v spreadsheet? I think lambda.
f64 T_use_theta = T_k.Te;
if (T_use_theta < 1.0e-12) T_use_theta = 1.0e-12;
f64 Theta = (1.1 + 0.4e-12 / T_use_theta);
if (w0z < vAC - 0.4e-4 / T_use_theta) {
//Theta *= exp(-w*(vC - 0.4e-4 / T_use_theta - w)*1.0e-12
// / (0.25*(vC - 0.4e-4 / T_use_theta)*(vC - 0.4e-4 / T_use_theta)*T_use_theta));
// Multiply through to save on divisions?:
Theta *= exp(-w0z*((vAC - w0z)* T_use_theta - 0.4e-4)*1.0e-12 /
(0.25*(vAC* T_use_theta - 0.4e-4)*(vAC* T_use_theta - 0.4e-4)));
};
// Available KE:
f64 Kconv = 0.5*m_e*m_n*n_k*n_n_k*(w0z*w0z) / (m_e*n_k + m_n*n_n_k);
if (TEST_IONIZE) printf("iVertex %d w0z %1.10E Kconv %1.10E Theta %1.9E \n", iVertex,
w0z, Kconv, Theta);
f64 coeff_on_ionizing = 0.5*T_k.Tn - 2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv);
// Now compute f(Tk) = T_k+1 given using T_k
f64 w = sqrt(0.5*(w0z*w0z + (v.vxy.x - v_n.x)*(v.vxy.x - v_n.x) + (v.vxy.y - v_n.y)*(v.vxy.y - v_n.y))); // CORRECTION FACTOR 0.5 ...
// ================
// Made a mistake and saved data for v that is sqrt(2) times greater by missing 0.5 out of lambda
// so
// data for "1e7" is actually for 1.4e7. Thus pass 1/sqrt(2) times our velocity
f64 T_image1, T2, T_image2, T_oldimage1, Tkplus2minus1;
hn = h_use*n_k;
hnn = h_use*n_k*n_k;
f64 T1 = T_use; // first go. = Tk if b_useTuse == false.
{
// if (TEST_IONIZE) {
// Gamma_ion = GetIonizationRatesDebug(T1, w, &Gamma_rec);
// } else {
Gamma_ion = fac_uplift*GetIonizationRates(T1, w, &Gamma_rec);
// };
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
T_image1 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec)/ n_kplus1;
}
T2 = T_image1;
int sign_k = (T2 - T1 > 0.0) ? 1 : -1; // usually -1 , ie negative rate of change, net ionization
// Torigmove = T2 - T1; -- no, it's T_image-T_k that we wanna use.
if (TEST_IONIZE) printf("iVertex %d original move T2 %1.9E T1 %1.9E \n", iVertex, T2, T1);
// X
// it's ok to use sign_k for the sign of the T_use move
// because if it's different sign to fwd move we never do overshooting test
// But what about if it's T<0? so fwd is recombining but new shift of T
// brings T_k+1<0.
// In that case we should be detecting it right here.
// First check if fwd next temperature would be negative:
bool bAccept = false;
// Try allowing to access the b_test loop:
bool b_test = b_useTuse;
// check that this brings back the 77 - it doesn't
if ((T2 < 0.0) && (b_useTuse) && (T_use > T_k.Te))
{
// in this case we should switch to T1 = T_k:
f64 T1 = T_k.Te; // first go. = Tk if b_useTuse == false.
{
Gamma_ion = fac_uplift*GetIonizationRates(T1, w, &Gamma_rec);
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
T_image1 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
}
T2 = T_image1;
sign_k = (T2 - T1 > 0.0) ? 1 : -1;
if (TEST_IONIZE) printf("iVertex %d switch to Tk \n", iVertex);
// X
// and turn off tests below involving assn of move?
b_test = false;
}
// DEBUG 2 -- it worked with this bit and b_test cut out
if (T2 < 0.0) {
while (T2 < 0.0) {
oldT1 = T1;
T1 *= 0.5;
T_oldimage1 = T_image1; // save
// Compute image of T1:
{
Gamma_ion = fac_uplift*GetIonizationRates(T1, w, &Gamma_rec);
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
T_image1 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
}
T2 = T_image1;
if (TEST_IONIZE) printf("iVertex %d T<0 loop: T2 %1.9E \n", iVertex, T2);
// X
};
// 3(a) If 2^-i Tk is an acceptable point, accept it.
if (T_image1 - T1 > 0.0) // T is now rising -- we crossed T_bwd from T_k
{ // Note: bwd criterion: T_image(use T_use from T_k) element (0, fwd image of Tk)
// 3.(c) If 2^-i T_k is lower than a bwd step, proceed to main loop with
// 2^-i T_k as T_far and 2^-(i-1) T_k as T_near:
T2 = T1; // "left point" (right if we were ascending)
T1 *= 2.0; // "right point" -- may be T_k itself
bAccept = false;
T_image1 = T_oldimage1;
T_image2 = T_image1;
if (TEST_IONIZE) printf("iVertex %d T now rising: T1 %1.9E T2 %1.9E \n", iVertex, T1, T2);
// X
} else {
// Test T1 for overshooting:
// T2 is already defined as image of T1
Gamma_ion = fac_uplift*GetIonizationRates(T2, w, &Gamma_rec);
n_kplus2 = n_kplus1 + h_use*n_n_kplus1*n_kplus1*Gamma_ion -
h_use*n_kplus1*n_kplus1*n_kplus1*Gamma_rec;
Tkplus2minus1 = (n_kplus1 / n_kplus2 - 1.0)*T2 +
coeff_on_ionizing*h_use*n_n_kplus1*Gamma_ion +
TWOTHIRDS*13.6*kB*h_use*n_kplus1*n_kplus1*Gamma_rec;
// compare this inflated difference of Tnext from T2
// with the difference T2-Tk :
if (Tkplus2minus1 < 0.0)// same sign as move from Tk to T2
{
bAccept = true; // accept this move Delta(T1)
} else {
// Test that the reversed magnitude is smaller.
bAccept = (SAFETY_FACTOR*fabs(Tkplus2minus1) < fabs(T2-T_k.Te) + LEEWAY);
};
if (bAccept == false) {
// Overshooting:
if (TEST_IONIZE) printf("iVertex %d T overshooting 1\n", iVertex);
// Y
// No adjustment to T1, T2 needed.
// Compute image of T2 under f_k:
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
T_image2 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
// We want this defined when we enter secant loop.
};
};
} else { // fwd T, or first step with T_use, didn't go below zero
// 4. If we are so close to equilibrium that the proposed
// change in temperature is tiny like 10^{-9}T then just set
// the actual ionization to 0. We have only 10^{-6}/10^{-13}=10^{7} steps.
// Changed factor to 1.0e-10
if (fabs(T_image1 - T_k.Te) < 1.0e-10*T_k.Te) {
// do nothing for a tiny move:
Delta_ionise = 0.0;
Delta_rec = 0.0;
bAccept = true;
if (TEST_IONIZE) printf("iVertex %d small move accepted\n", iVertex);
// Y
bZero_out = true;
// To move 10% would take 1e8 moves, we have only 1e-6/1e-13 = 1e7.
} else {
if (TEST_IONIZE) printf("%d b_test %d \n",iVertex, (b_test ? 1 : 0));
// Y
if (b_test) {
// in this case we now want to check whether our move
// is the same sign as the T_k move.
// We can check the sign_k just by evaluating ionization rates at T_k
// No, we pretty much need to work out which one is winning out.
Gamma_ion = fac_uplift*GetIonizationRates(T_k.Te, w, &Gamma_rec);
f64 Delta_ionise_k = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
f64 Delta_rec_k = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise_k - Delta_rec_k; // Delta_rec is amount recombining.
f64 T_image_k = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
if (TEST_IONIZE) printf("iVertex %d bTest was true; T_image_k %1.9E T_k %1.9E T_image1 %1.9E T1 %1.9E\n",
iVertex, T_image_k, T_k.Te, T_image1, T1);
// Y
if (((T_image_k > T_k.Te) && (T_image1 < T1))
||
((T_image_k < T_k.Te) && (T_image1 > T1)))
{
// different sign:
// If it's different sign, either accept it if the move
// brings us the f(Tk) side of Tk, or set it to 0
// if the move would take us the opposite direction from Tk.
bAccept = true;
// We have not changed Delta_ionize
if (((T_image_k > T_k.Te) && (T_image1 < T_k.Te))
||
((T_image_k < T_k.Te) && (T_image1 > T_k.Te)))
{
Delta_ionise = 0.0;
Delta_rec = 0.0;
// A better solution may exist.
if (TEST_IONIZE) printf("iVertex %d setted Delta_ionise to 0\n", iVertex);
// Z
};
} else {
// If it's the same sign, pass to the following code
// which asks if it is overshooting.
// We have not changed T2 or T1 or T_image1
if (TEST_IONIZE) printf("iVertex %d pass to secant loop\n", iVertex);
// Z
};
};
// putative Fwd Euler move neither had T < 0 nor was tiny.
// Overshooting test for Fwd Euler:
if (TEST_IONIZE) printf("iVertex %d overshooting test for Fwd move\n", iVertex);
// Z
bAccept = false;
Gamma_ion = fac_uplift*GetIonizationRates(T2, w, &Gamma_rec);
n_kplus2 = n_kplus1 + h_use*n_n_kplus1*n_kplus1*Gamma_ion -
h_use*n_kplus1*n_kplus1*n_kplus1*Gamma_rec;
// Comparator:
Tkplus2minus1 = (n_kplus1 / n_kplus2 - 1.0)*T2 +
coeff_on_ionizing*h_use*n_n_kplus1*Gamma_ion +
TWOTHIRDS*13.6*kB*h_use*n_kplus1*n_kplus1*Gamma_rec;
// compare this inflated difference of Tnext from T2 with the difference T2-T1:
if (((Tkplus2minus1 > 0.0) && (sign_k > 0))
||
((Tkplus2minus1 < 0.0) && (sign_k < 0)))
{
bAccept = true; // Accept forward Euler move; Delta was set.
// Or on main step, accept "T_k+1/2" move
if (TEST_IONIZE) printf("iVertex %d comparator same sign; accept\n",iVertex);
// Z
} else {
bAccept = (SAFETY_FACTOR*fabs(Tkplus2minus1) < fabs(T2-T_k.Te)+LEEWAY);
// Accept only if the comparator is smaller in magnitude.
if (TEST_IONIZE) printf("iVertex %d comparison %1.10E vs %1.10E\n", iVertex,
SAFETY_FACTOR*fabs(Tkplus2minus1) , fabs(T2 - T_k.Te) + LEEWAY);
// Z
};
// got rid of probs by commenting from here
if (bAccept == false) {
// construct f_k image of T2 for use in secant:
f64 hnGamma_ion = h_use*Gamma_ion*n_k;
f64 hnnGamma_rec = h_use*Gamma_rec*n_k*n_k;
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
T_image2 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
}
}; // whether small move
}; // whether fwd T < 0
// Main loop:
int ctr = 0;
while ((bAccept == false) && (ctr < 100)){
++ctr;
// max 100 iterations but I don't see any reason hard limit will be needed.
// Calculate secant from existing points:
// We have T1, T2 coming in
// T1 is the one closer to T_k, T2 is the other side of bwd T
f64 T_sec = (T2*T_image1 - T1*T_image2) / (T2 - T_image2 - T1 + T_image1);
// This approximates a backward step.
// Try 'midpoint': we want to be on fwd side of bwd step
f64 T_est = 0.5*(T_sec + T1);
// Calculate image starting from T_k and using T_est
//if (TEST_IONIZE) {
//Gamma_ion = GetIonizationRatesDebug(T_est, w, &Gamma_rec);
//} else {
Gamma_ion = fac_uplift*GetIonizationRates(T_est, w, &Gamma_rec);
//};
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
// *** Set for the move we are testing ***
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
f64 T_image_est = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
if (TEST_IONIZE) printf("iVertex %d secant loop T1 %1.9E T2 %1.9E T_est %1.9E T_image_est %1.9E Gamma_ion %1.9E Gamma_rec %1.9E\n",
iVertex, T1, T2, T_est, T_image_est, Gamma_ion, Gamma_rec);
// A - worked with
bAccept = false;
if ((T_image_est < 0.0) && (sign_k < 0)) // If T goes negative it does count as overshooting, supposing original dT was decreasing.
{
// Can only get here if non-monotonic?
// overshooting:
T1 = T_est;
T_image1 = T_image_est;
bAccept = false;
} else {
// between bwd & fwd: T_image_est - T_est between 0 and Torigmove.
if (
((sign_k > 0) && (T_image_est - T_est < 0.0))
|| // going wrong way starting from T_k: therefore beyond bwd
((sign_k < 0) && (T_image_est - T_est > 0.0))
)
{
T2 = T_est;
T_image2 = T_image_est;
bAccept = false;
if (TEST_IONIZE) printf("iVertex %d : T_est beyond bwd\n", iVertex);
// A - works with
// This is beyond bwd but do we need to check that we are not exceeding a fwd.
// .. If we got here then the fwd step is overshooting so that greater move would presumably be overshooting also.
// We now defined sign_k based on the requested T's move
// Therefore can we say that
// 1. T2 is on the opposite side of T_use from T_k
// 2. Moving back towards T_use will achieve the same sign as T_use
// ... it is possible that we were given T_use the other side of bwd so then
// we are stuck? can it be the other side of bwd, overshoot T_k away from itself ---
// we need to address separate cases.
} else {
// Test overshooting:
bAccept = false;
if (TEST_IONIZE) printf("iVertex %d : T_est overshooting test\n", iVertex);
// A - works with
// T_image_est is > 0 if we got here.
Gamma_ion = fac_uplift*GetIonizationRates(T_image_est, w, &Gamma_rec);
// Comparator:
n_kplus2 = n_kplus1 + h_use*n_n_kplus1*n_kplus1*Gamma_ion -
h_use*n_kplus1*n_kplus1*n_kplus1*Gamma_rec;
// Comparator:
Tkplus2minus1 = (n_kplus1 / n_kplus2 - 1.0)*T2 +
coeff_on_ionizing*h_use*n_n_kplus1*Gamma_ion +
TWOTHIRDS*13.6*kB*h_use*n_kplus1*n_kplus1*Gamma_rec;
// compare this inflated difference of Tnext from T2 with the difference T2-T1:
if (((Tkplus2minus1 > 0.0) && (T2-T_k.Te > 0.0))
||
((Tkplus2minus1 < 0.0) && (T2-T_k.Te < 0.0)))
{
bAccept = true; // same sign onward => not overshooting eqm
if (TEST_IONIZE) printf("iVertex %d comparator same sign\n", iVertex);
// A
} else {
bAccept = (SAFETY_FACTOR*fabs(Tkplus2minus1) < fabs(T2-T_k.Te)+LEEWAY);
if (TEST_IONIZE) printf("iVertex %d comparison %1.10E vs %1.10E\n",
SAFETY_FACTOR*fabs(Tkplus2minus1), fabs(T2 - T_k.Te) + LEEWAY);
// A
// Accept only if the comparator is smaller in magnitude.
if (bAccept == false) {
T1 = T_est; // still overshooting
T_image1 = T_image_est;
};
};
};
};
}; // end while: bAccept == true
// Now calculate what to do, given this move:
// ==========================================
// Aim in the above:
// Delta_ionise and delta_rec should be already set.
// This bit is not perfect. It breaks down the changes into steps.
// 0. What is ROC of N:
if (bZero_out) {
// do nothing to MAR_elec, ourrates etc
} else {
f64 dNdt_ionise = AreaMajor*Delta_ionise / h_use;
f64 dNdt_recombine = AreaMajor*Delta_rec / h_use;
ourrates.N += dNdt_ionise - dNdt_recombine;
ourrates.Nn += dNdt_recombine - dNdt_ionise;
if (TEST_IONIZE) printf("Delta_ionise %1.10E Delta_rec %1.10E ourrates.N %1.10E \n",
Delta_ionise, Delta_rec, ourrates.N);
// Store existing energy density:
// f64 Energy_k = 1.5*(n_k*(T_k.Te + T_k.Ti) + n_n_k*T_k.Tn) +
// 0.5*((m_e + m_i)*n_k*(v.vxy.dot(v.vxy)) + m_e*n_k*v.vez*v.vez + m_i*n_k*v.viz*v.viz + m_n*n_n_k*v_n.dot(v_n));
// 1. Calculate kinetic energy absorption impact on vez, vnz
// ie Ionization resistance to current
n_kplus1 = n_k + Delta_ionise - Delta_rec;
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
// Absorbed DKE:
f64 deltaKE = -(2.0*Theta*Kconv / (3.0*n_k*T_k.Te + 2.0*Theta*Kconv))*Delta_ionise*13.6*kB;
f64 safe_argument = (m_e*n_kplus1 + m_n*n_n_kplus1)*
((n_k*n_n_k / (m_e*n_k + m_n*n_n_k))*(w0z*w0z) + 2.0*deltaKE / (m_e*m_n)) /
(n_kplus1*n_n_kplus1);
f64 new_vz_diff;
if (safe_argument >= 0.0) {
new_vz_diff = sqrt(safe_argument);
} else {
new_vz_diff = 0.0; // This is just to stop it crashing, the theory of why new_vz_diff sqrt should have +ve argument, I have not revisited.
};
// I guess it can be -ve.
// Choose new_vz_diff to have same sign as w0z = diff_k:
if (w0z < 0.0) new_vz_diff = -new_vz_diff;
f64 delta_vez = m_n*n_n_kplus1*(-w0z + new_vz_diff) /
(m_n*n_n_kplus1 + m_e*n_kplus1);
f64 delta_vnz = -m_e*n_kplus1*delta_vez / (m_n*n_n_kplus1);
if (TEST_IONIZE) printf("deltaKE %1.10E v.vez %1.10E w0z %1.9E new_vz_diff %1.9E delta_vez %1.9E\n",
deltaKE, v.vez, w0z, new_vz_diff, delta_vez);
// Check: w0 = vez-vnz - tick
// should change to scalar.
MAR_neut.z += AreaMajor*n_n_kplus1*delta_vnz / h_use;
MAR_elec.z += AreaMajor*n_kplus1*delta_vez / h_use;
f64_vec3 ve_kplus1, vi_kplus1, vn_kplus1;
f64_vec3 v_use;
v_use.x = v.vxy.x;
v_use.y = v.vxy.y;
v_use.z = (m_e*v.vez + m_i*v.viz) / (m_e + m_i);
// Store alongside: v_k+1 so that we can follow the anticipated change in energy,
// to create energy balance:
ve_kplus1.x = v.vxy.x*(n_k / n_kplus1);
ve_kplus1.y = v.vxy.y*(n_k / n_kplus1);
ve_kplus1.z = v.vez*(n_k / n_kplus1) + delta_vez; // we need to store v, we could also store nv if we wanted.
vi_kplus1.x = v.vxy.x*(n_k / n_kplus1);
vi_kplus1.y = v.vxy.y*(n_k / n_kplus1);
vi_kplus1.z = v.viz*(n_k / n_kplus1);
vn_kplus1 = v_n*(n_n_k / n_n_kplus1); // Check in accel routine to be sure this will ever actually happen
vn_kplus1.z += delta_vnz;
// Does it happen automatically or do we need to include the n_k+1/n_k effect in MAR_ ????
// It does NOT work automatically !!!
// We have to include the effect here ---- stupid us, don't know why doing it this way.
// Where used: v0.viz = vie_k.viz + h_use * MAR.z / (n_use.n*AreaMinor);
// and n_use is the target n_kplus1.
// MAR_neut += AreaMajor*n_n_kplus1*(v_n*(n_n_k/ n_n_kplus1) - v_n) / h_use;
MAR_neut += AreaMajor*(v_n*(n_n_k - n_n_kplus1)) / h_use;
MAR_ion += AreaMajor*(Make3(v.vxy,v.viz)*(n_k - n_kplus1)) / h_use;
MAR_elec += AreaMajor*(Make3(v.vxy,v.vez)*(n_k - n_kplus1)) / h_use;
// diluting v..
// 2. Add the effect of xfers on momenta:
// Let's think about this clearly:
// v_k+1 = (1/n_k+1) (n_k v_k + delta_ionize v_n - delta_rec v_use)
// = v_k + h * MAR / (n_k+1 * Area);
// MAR(h/area) = (v_k+1 - v_k)(n_k+1)
// = ( n_k v_k + delta_ionize v_n - delta_rec v_use - v_k n_k+1)
// We split the change into local minor cells, hopefully correctly.
vn_kplus1 -= (Delta_ionise*v_n - Delta_rec*v_use) / n_n_kplus1;
// n_k+1 v_k+1 = n_k v_k + Delta_n*v_use => v_k+1 = (n_k/n_k+1) v_k + (Delta_n/n_k+1) v_use
vi_kplus1 += (Delta_ionise*v_n - Delta_rec*v_use) / n_kplus1;
ve_kplus1 += (Delta_ionise*v_n - Delta_rec*v_use) / n_kplus1;
MAR_neut += -dNdt_ionise*v_n + dNdt_recombine*v_use; // area*delta_n/h * v_use
MAR_ion += dNdt_ionise*v_n - dNdt_recombine*v_use;
MAR_elec += dNdt_ionise*v_n - dNdt_recombine*v_use;
if (TEST_IONIZE) printf("__ ve_kplus1 %1.9E %1.9E %1.9E vi_plus1 %1.9E %1.9E %1.9E vn_plus1 %1.9E %1.9E %1.9E delta_vez %1.9E\n",
ve_kplus1.x, ve_kplus1.y, ve_kplus1.z, vi_kplus1.x, vi_kplus1.y, vi_kplus1.z,
vn_kplus1.x, vn_kplus1.y, vn_kplus1.z, delta_vez);
// . Ionization cooling & recombination heating
//f64 coeff_on_ionizing = 0.5*T_k.Tn - 2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv);
// ourrates.NeTe +=
// dNdt_recombine*2.0*13.6*kB / 3.0
// - (2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv))*dNdt_ionise;
// We can drop this: it will be accounted for by the final energy balance.
// 3. Add to nT for x-fers due to species converting
// ourrates.NiTi += 0.5*dNdt_ionise*T_k.Tn;
// ourrates.NeTe += 0.5*dNdt_ionise*T_k.Tn;
// ourrates.NnTn -= dNdt_ionise*T_k.Tn; // no longer need this, it is incorporated below
// f64 nTe_kplus1 = T_k.Te*(n_k)+0.5*Delta_ionise*T_k.Tn;
// f64 nTi_kplus1 = T_k.Ti*(n_k)+0.5*Delta_ionise*T_k.Tn;
// f64 n_nTn_kplus1 = T_k.Tn*(n_n_k)-Delta_ionise*T_k.Tn;
// without any change to NeTe it would stay just the same as at timeslice k.
// 4. Energy balance through Te:
// Maybe we should rather be seeking OVERALL energy balance where KE_result is from n_k+1, v_k+1
// and we ensure that we have lost the right amount of energy overall.
// That is the better way:
// f64 KE_result = 0.5*(m_e*n_kplus1*ve_kplus1.dot(ve_kplus1) + m_i*n_kplus1*vi_kplus1.dot(vi_kplus1)
// + m_n*n_n_kplus1*vn_kplus1.dot(vn_kplus1));
// if (TEST_IONIZE) printf("n_kplus1 %1.12E ve_kplus1 %1.9E %1.9E %1.9E vi_plus1 %1.9E %1.9E %1.9E vn_plus1 %1.9E %1.9E %1.9E\n",
// n_kplus1, ve_kplus1.x, ve_kplus1.y, ve_kplus1.z, vi_kplus1.x, vi_kplus1.y, vi_kplus1.z,
// vn_kplus1.x, vn_kplus1.y, vn_kplus1.z);
f64 Energy_density_k_n = 0.5*m_n*n_n_k*v_n.dot(v_n) + 1.5*n_n_k*T_k.Tn;
f64 Energy_density_k_i = 0.5*m_i*n_k*(v.vxy.dot(v.vxy) + v.viz*v.viz) + 1.5*n_k*T_k.Ti;
f64 Energy_density_k_e = 0.5*m_e*n_k*(v.vxy.dot(v.vxy) + v.vez*v.vez) + 1.5*n_k*T_k.Te;
// The energy density given the change in velocity but zero change in heat:
f64 Energy_density_kplus1_e = 0.5*m_e*n_kplus1*ve_kplus1.dot(ve_kplus1) + 1.5*T_k.Te*n_k;
// without any change to NeTe it would stay just the same as at timeslice k.
f64 Energy_density_kplus1_i = 0.5*m_i*n_kplus1*vi_kplus1.dot(vi_kplus1) + 1.5*T_k.Ti*n_k;
f64 Energy_density_kplus1_n = 0.5*m_n*n_n_kplus1*vn_kplus1.dot(vn_kplus1) + 1.5*T_k.Tn*n_n_k;
f64 Energy_density_target_n = Energy_density_k_n + 0.5*m_n*Delta_rec*v_use.dot(v_use) + 1.5*Delta_rec*(T_k.Ti + T_k.Te)
- 0.5*m_n*Delta_ionise*v_n.dot(v_n) - 1.5*Delta_ionise*T_k.Tn;
f64 Energy_density_target_i = Energy_density_k_i + 0.5*m_i*Delta_ionise*v_n.dot(v_n) + 1.5*(m_i / m_n)*Delta_ionise*T_k.Tn
- 0.5*m_i*Delta_rec*v_use.dot(v_use) - 1.5*Delta_rec*T_k.Ti;
f64 Energy_density_target_e = Energy_density_k_e + 0.5*m_e*Delta_ionise*v_n.dot(v_n) + 1.5*(m_e / m_n)*Delta_ionise*T_k.Tn
- 0.5*m_e*Delta_rec*v_use.dot(v_use) - 1.5*Delta_rec*T_k.Te
- 13.6*kB*(Delta_ionise - Delta_rec);
// f64 Energy_density_target = Energy_k - 13.6*kB*(Delta_ionise - Delta_rec);
// Additional_heat = (KE_k + deltaKE) - KE_result; // usually positive
// 1*1+3*3 > 2*2 + 2*2 so KE is generally decreasing by friction; KE_result < KE_k+deltaKE
// KE_result + Added_heat + existing heat = desired total energy = KE_k + heat_k + deltaKE
// 1.5 nT += Frictional_heating
// NTe += (2/3) Area Frictional_heating
if (TEST_IONIZE) printf("AreaMajor %1.9E h_use %1.9E 0.6666 13.6 kB %1.9E \n"
"Energy_density_kplus1_i %1.12E target_i %1.12E k_i %1.12E \n"
"vi_k %1.10E %1.10E %1.10E kplus1 %1.10E %1.10E %1.10E\n"
"0.5*m_i*Delta_ionise*v_n.dot(v_n) %1.9E 1.5*(m_i / m_n)*Delta_ionise*T_k.Tn %1.9E\n"
"-0.5*m_i*Delta_rec*v_use.dot(v_use) %1.9E -1.5*Delta_rec*T_k.Ti %1.9E\n",
AreaMajor, h_use, 0.666667*13.6*kB,
Energy_density_kplus1_i, Energy_density_target_i, Energy_density_k_i,
v.vxy.x, v.vxy.y, v.viz, vi_kplus1.x, vi_kplus1.y, vi_kplus1.z,
0.5*m_i*Delta_ionise*v_n.dot(v_n), 1.5*(m_i / m_n)*Delta_ionise*T_k.Tn,
-0.5*m_i*Delta_rec*v_use.dot(v_use) , -1.5*Delta_rec*T_k.Ti
);
if (TEST_IONIZE) printf("ourrates.NiTi before: %1.11E \n", ourrates.NiTi);
ourrates.NeTe += 2.0*AreaMajor*
(Energy_density_target_e - Energy_density_kplus1_e) / (3.0*h_use);
ourrates.NiTi += 2.0*AreaMajor*
(Energy_density_target_i - Energy_density_kplus1_i) / (3.0*h_use);
ourrates.NnTn += 2.0*AreaMajor*
(Energy_density_target_n - Energy_density_kplus1_n) / (3.0*h_use);
if (TEST_IONIZE) printf("ourrates.NiTi after: %1.11E Area*n_kplus1 %1.9E \n\n", ourrates.NiTi,
AreaMajor*n_kplus1);
//
// if ((Energy_density_target_e - Energy_density_kplus1_e) / (n_k*AreaMajor) > 1.0e-8)
// {
// printf("Vertex %d vez(k+1) %1.9E vezk %1.9E delta_vez %1.9E\n"
// "iVertex %d n_k %1.9E N_k %1.9E Te_k %1.9E NeTe %1.9E h*NeTe %1.9E \n"
// "Ti_k %1.9E h*NiTi %1.9E Tn_k %1.9E h*NnTn %1.9E \n"
// "Delta_ionise %1.9E rec %1.9E deltaKE %1.9E deltavez %1.9E\n"
// "Predicted Te %1.12E Theta %1.12E \n"
// "Energy_k %1.12E w0z %1.9E energy_kplus1 %1.12E energy_target %1.12E \n"
// "KEk %1.12E Heat_k %1.12E \n",
// iVertex,
// ve_kplus1.z, v.vez, delta_vez,
// iVertex, n_k, n_k*AreaMajor, T_k.Te, ourrates.NeTe, h_use*ourrates.NeTe,
// T_k.Ti, h_use*ourrates.NiTi, T_k.Tn, h_use*ourrates.NnTn,
// Delta_ionise, Delta_rec, deltaKE, delta_vez,
// (n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use) / (n_k*AreaMajor),
// Theta, Energy_k, w0z, Energy_density_kplus1_e, Energy_density_target_e,
// 0.5*((m_e + m_i)*n_k*(v.vxy.dot(v.vxy)) + m_e*n_k*v.vez*v.vez + m_i*n_k*v.viz*v.viz + m_n*n_n_k*v_n.dot(v_n)),
// 1.5*(n_k*T_k.Te + n_k*T_k.Ti + n_n_k*T_k.Tn));
// printf("iVertex %d "
// "ve_kplus1 %1.9E %1.9E %1.9E vi_plus1 %1.9E %1.9E %1.9E vn_plus1 %1.9E %1.9E %1.9E\n"
// "v_n %1.9E %1.9E %1.9E n_n_k %1.9E n_n_kplus1 %1.9E n_k %1.9E n_kplus1 %1.9E \n"
// "Theta %1.10E Kconv %1.10E deltaKE %1.10E ppnK %1.10E full_loss %1.10E\n",
// iVertex,
// ve_kplus1.x, ve_kplus1.y, ve_kplus1.z,
// vi_kplus1.x, vi_kplus1.y, vi_kplus1.z,
// vn_kplus1.x, vn_kplus1.y, vn_kplus1.z,
// v_n.x, v_n.y, v_n.z, n_n_k, n_n_kplus1, n_k, n_kplus1,
// Theta, Kconv, deltaKE,
// (2.0*Theta*Kconv / (3.0*n_k*T_k.Te + 2.0*Theta*Kconv)),Delta_ionise*13.6*kB
// );
// }
// DEBUG:
if (TEST_IONIZE) printf("iVertex %d n_k %1.9E n_n_k %1.9E N_k %1.9E Te_k %1.9E NeTe %1.9E \n h*NeTe %1.9E "
"Ti_k %1.9E h*NiTi %1.9E Tn_k %1.9E h*NnTn %1.9E \n"
"Delta_ionise %1.9E rec %1.9E \n",
iVertex, n_k, n_n_k, n_k*AreaMajor, T_k.Te, ourrates.NeTe, h_use*ourrates.NeTe,
T_k.Ti, h_use*ourrates.NiTi, T_k.Tn, h_use*ourrates.NnTn,
Delta_ionise, Delta_rec
);
// DEBUG:
if (TEST_IONIZE) //n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use < 0.0)
printf("%d Predicted Te %1.9E \n", iVertex, (n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use) / (n_k*AreaMajor));
// Try to get rid of 77
}
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
memcpy(p_MAR_neut + iVertex, &MAR_neut, sizeof(f64_vec3));
memcpy(p_MAR_ion + iVertex, &MAR_ion, sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex, &MAR_elec, sizeof(f64_vec3));
// ****************************************************************************************************
//// f64 TeV = T.Te * one_over_kB;
//// We loaded in ourrates.NT which indicates the new heat available so we should include some of that.
//// The main impact will be from heat conduction; dN/dt due to advection neglected here.
//f64 TeV = one_over_kB * (T.Te*our_n.n*AreaMajor + h_use*ourrates.NeTe)/
// (our_n.n*AreaMajor + h_use*ourrates.N);
//// Should be very careful here: ourrates.NeTe can soak to neutrals on timescale what? 1e-11?
//if (TeV < 0.0) {
// printf("\n\niVertex %d : ourrates.N %1.14E denominator %1.14E \n"
// " AreaMajor %1.14E TeV %1.14E ourrates.NeTe %1.10E h %1.10E \n"
// "ourrates.Nn %1.10E n %1.10E n_n %1.10E Te %1.10E Tn %1.10E \n\n",
// iVertex, ourrates.N,
// (our_n.n*AreaMajor + h_use*ourrates.N),
// AreaMajor, TeV, ourrates.NeTe, h_use,
// ourrates.Nn, our_n.n, our_n.n_n, T.Te, T.Tn);
//
//}
//f64 sqrtT = sqrt(TeV);
//f64 temp = 1.0e-5*exp(-13.6 / TeV) / (13.6*(6.0*13.6 + TeV)); // = S / T^1/2
// // Let h n n_n S be the ionising amount,
// // h n S is the proportion of neutrals! Make sure we do not run out!
////f64 hnS = (h_use*our_n.n*TeV*temp) / (sqrtT + h_use * our_n.n_n*temp*SIXTH*13.6);
// // d/dt (sqrtT) = 1/2 dT/dt T^-1/2.
// // dT[eV]/dt = -TWOTHIRDS * 13.6* n_n* sqrtT *temp
// // d/dt (sqrtT) = -THIRD*13.6*n_n*temp;
//// kind of midpoint, see SIXTH not THIRD:
//f64 Model_of_T_to_half = TeV / (sqrtT + h_use*SIXTH*13.6*our_n.n_n*temp / (1.0 - h_use*(our_n.n_n - our_n.n)*temp*sqrtT));
//f64 hS = h_use*temp*Model_of_T_to_half;
//
//// NEW:
//f64 ionise_rate = AreaMajor * our_n.n_n * our_n.n*hS /
// (h_use*(1.0 + hS*(our_n.n-our_n.n_n))); // dN/dt
//ourrates.N += ionise_rate;
//ourrates.Nn += -ionise_rate;
//// Let nR be the recombining amount, R is the proportion.
//TeV = T.Te * one_over_kB;
//f64 Ttothe5point5 = sqrtT * TeV * TeV*TeV * TeV*TeV;
//f64 hR = h_use * (our_n.n * our_n.n*8.75e-27*TeV) /
// (Ttothe5point5 + h_use * 2.25*TWOTHIRDS*13.6*our_n.n*our_n.n*8.75e-27);
//// T/T^5.5 = T^-4.5
//// T/(T^5.5+eps) < T^-4.5
//// For some reason I picked 2.25 = 4.5/2 instead of 5.5/2.
//// But basically it looks reasonable.
//// Maybe the additional stuff is an estimate of the change in T[eV]^5.5??
//// d/dt T^5.5 = 5.5 T^4.5 dT/dt
//// dT/dt = TWOTHIRDS * 13.6*( hR / h_use) = TWOTHIRDS * 13.6*( n^2 8.75e-27 T^-4.5)
//// d/dt T^5.5 = 5.5 TWOTHIRDS * 13.6*( n^2 8.75e-27 )
//f64 recomb_rate = AreaMajor * our_n.n * hR / h_use; // could reasonably again take hR/(1+hR) for n_k+1
//ourrates.N -= recomb_rate;
//ourrates.Nn += recomb_rate;
//if (TEST) printf("%d recomb rate %1.10E ionise_rate %1.10E our_n.n %1.10E nn %1.10E hR %1.10E hS %1.10E\n"
// "h_use %1.8E sqrtTeV %1.10E Ttothe5point5 %1.9E Te %1.9E modelThalf %1.9E\n", iVertex,
// recomb_rate, ionise_rate, our_n.n, our_n.n_n, hR, hS, h_use, sqrtT, Ttothe5point5, T.Te, Model_of_T_to_half);
//ourrates.NeTe += -TWOTHIRDS * 13.6*kB*(ionise_rate - recomb_rate) + 0.5*T.Tn*ionise_rate;
//ourrates.NiTi += 0.5*T.Tn*ionise_rate;
//ourrates.NnTn += (T.Te + T.Ti)*recomb_rate;
//if (TEST) {
// printf("kernelIonisation %d NeTe %1.12E NiTi %1.12E NnTn %1.12E\n"
// "due to I+R : NeTe %1.12E NiTi %1.12E NnTn %1.12E\n"
// "d/dtNeTe/N %1.9E d/dtNiTi/N %1.9E d/dtNnTn/Nn %1.9E \n\n",
// iVertex, ourrates.NeTe, ourrates.NiTi, ourrates.NnTn,
// -TWOTHIRDS * 13.6*kB*(ionise_rate - recomb_rate) + 0.5*T.Tn*ionise_rate,
// 0.5*T.Tn*ionise_rate,
// (T.Te + T.Ti)*recomb_rate,
// ourrates.NeTe / (our_n.n*AreaMajor), ourrates.NiTi / (our_n.n*AreaMajor), ourrates.NnTn / (our_n.n_n*AreaMajor));
//};
//memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
};
}
__global__ void kernelAdvanceDensityAndTemperature(
f64 h_use,
structural * __restrict__ p_info_major,
nvals * p_n_major,
T3 * p_T_major,
NTrates * __restrict__ NTadditionrates,
// Think we see the mistake here: are these to be major or minor values?
// Major, right? Check code:
nvals * p_n_use,
T3 * p_T_use,
v4 * __restrict__ p_vie_use,
f64_vec3 * __restrict__ p_v_n_use,
f64 * __restrict__ p_div_v_neut,
f64 * __restrict__ p_div_v,
f64 * __restrict__ p_Integrated_div_v_overall,
f64 * __restrict__ p_AreaMajor, // hmm
nvals * __restrict__ p_n_major_dest,
T3 * __restrict__ p_T_major_dest
)
{
// runs for major tile
// nu would have been a better choice to go in shared as it coexists with the 18 doubles in "LHS","inverted".
// Important to set 48K L1 for this routine.
__shared__ nvals n_src_or_use[threadsPerTileMajor];
__shared__ f64 AreaMajor[threadsPerTileMajor];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iVertex OF VERTEX
structural info = p_info_major[iVertex];
// if (iVertex == CHOSEN) printf("GPU iVertex %d info.flag %d \n", CHOSEN, info.flag);
if ((info.flag == DOMAIN_VERTEX)) {
n_src_or_use[threadIdx.x] = p_n_major[iVertex]; // used throughout so a good candidate to stick in shared mem
AreaMajor[threadIdx.x] = p_AreaMajor[iVertex]; // ditto
NTrates newdata;
{
NTrates AdditionNT = NTadditionrates[iVertex];
newdata.N = n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] + h_use * AdditionNT.N;
newdata.Nn = n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] + h_use * AdditionNT.Nn;
newdata.NnTn = h_use * AdditionNT.NnTn; // start off without knowing 'factor' so we can ditch AdditionNT
newdata.NiTi = h_use * AdditionNT.NiTi;
newdata.NeTe = h_use * AdditionNT.NeTe;
if (TEST)
printf("Advance_nT %d : nsrc %1.12E nn %1.12E *AreaMajor %1.12E %1.12E\n"
"newdata.Nn %1.12E newdata.Ni %1.12E AreaMajor %1.10E \n"
"h*additionNiTi %1.12E for e %1.12E for n %1.12E \n"
"AdditionNT.e %1.10E h_use %1.10E\n"
, iVertex,
n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n,
n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x],
n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x],
newdata.Nn, newdata.N, AreaMajor[threadIdx.x],
newdata.NiTi, newdata.NeTe, newdata.NnTn,
AdditionNT.NeTe, h_use);
}
// So at this vertex, near the insulator, NiTi that comes in is NaN. Is that advection or diffusion?
// Have to go to bed tonight...
{
nvals n_dest;
f64 Div_v_overall_integrated = p_Integrated_div_v_overall[iVertex];
n_dest.n = newdata.N / (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated); // Do have to worry whether advection steps are too frequent.
n_dest.n_n = newdata.Nn / (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated); // What could do differently: know ROC area as well as mass flux through walls
p_n_major_dest[iVertex] = n_dest;
// if (iVertex == CHOSEN) printf("GPU %d n_dest.n_n %1.14E Area_used %1.14E \n\n", iVertex, n_dest.n_n,
// (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated));
}
// roughly right ; maybe there are improvements.
// --------------------------------------------------------------------------------------------
// Simple way of doing area ratio for exponential growth of T:
// (1/(1+h div v)) -- v outward grows the area so must be + here.
// Compressive heating:
// USE 1 iteration of Halley's method for cube root:
// cu_root Q =~~= x0(x0^3+2Q)/(2x0^3+Q) .. for us x0 = 1, Q is (1+eps)^-2
// Thus (1+2(1+eps)^-2)/(2+(1+eps)^-2)
// Multiply through by (1+eps)^2:
// ((1+eps)^2+2)/(1+2*(1+eps)^2) .. well of course it is
// eps = h div v
// Way to get reasonable answer without re-doing equations:
// Take power -1/3 and multiply once before interspecies and once after.
f64 factor, factor_neut; // used again at end
{
f64 Div_v = p_div_v[iVertex];
f64 Div_v_n = p_div_v_neut[iVertex];
factor = (3.0 + h_use * Div_v) /
(3.0 + 2.0* h_use * Div_v);
factor_neut = (3.0 + h_use * Div_v_n) /
(3.0 + 2.0*h_use * Div_v_n);
}
// gives (1+ h div v)^(-1/3), roughly
// Alternate version:
// factor = pow(pVertex->AreaCell / pVertDest->AreaCell, 2.0 / 3.0);
// pVertDest->Ion.heat = pVertex->Ion.heat*factor;
// but the actual law is with 5/3
// Comp htg dT/dt = -2/3 T div v_fluid
// factor (1/(1+h div v))^(2/3) --> that's same
{
T3 T_src = p_T_major[iVertex];
newdata.NnTn += n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] * T_src.Tn*factor_neut;
newdata.NiTi += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Ti*factor;
newdata.NeTe += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Te*factor;
//
if (TEST) {
printf("\nAdvance_nT %d : n %1.12E Area %1.12E compressfac %1.10E \n"
"newdate.NiTi %1.12E Ti_k %1.12E newdata.NeTe %1.10E Te_k %1.10E\n",
iVertex, n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], factor,
newdata.NiTi, T_src.Ti, newdata.NeTe, T_src.Te);
}
}
f64 nu_ne_MT, nu_en_MT, nu_ni_MT, nu_in_MT, nu_ei; // optimize after
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal, lnLambda, s_in_MT, s_en_MT, s_en_visc;
n_src_or_use[threadIdx.x] = p_n_use[iVertex];
T3 T_use = p_T_use[iVertex];
sqrt_Te = sqrt(T_use.Te); // should be "usedata"
ionneut_thermal = sqrt(T_use.Ti / m_ion + T_use.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_src_or_use[threadIdx.x].n, T_use.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T_use.Ti*one_over_kB,
&s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T_use.Te*one_over_kB, // call with T in electronVolts
&s_en_MT,
&s_en_visc);
//s_en_MT = Estimate_Ion_Neutral_MT_Cross_section(T_use.Te*one_over_kB);
//s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_use.Te*one_over_kB);
if (n_src_or_use[threadIdx.x].n_n > ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n) {
s_en_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
s_in_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
}
// Need nu_ne etc to be defined:
nu_ne_MT = s_en_MT * n_src_or_use[threadIdx.x].n * electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT = s_in_MT * n_src_or_use[threadIdx.x].n * ionneut_thermal;
nu_en_MT = s_en_MT * n_src_or_use[threadIdx.x].n_n*electron_thermal;
nu_in_MT = s_in_MT * n_src_or_use[threadIdx.x].n_n*ionneut_thermal;
//
// if (iVertex == CHOSEN) {
// printf("nu_en_MT components GPU : %1.8E %1.8E %1.8E \n",
// s_en_MT, n_src_or_use[threadIdx.x].n_n, electron_thermal);
// f64 T = T_use.Te*one_over_kB;
// int j;
// printf("T = %1.10E\n", T);
// for (j = 0; j < 10; j++)
// printf("%d : cross_T_vals_d %1.10E cross_s_vals_MT %1.10E \n",
// j, cross_T_vals_d[j], cross_s_vals_MT_ni_d[j]);
// int i = 1;
// if (T > cross_T_vals_d[5]) {
// if (T > cross_T_vals_d[7]) {
// if (T > cross_T_vals_d[8])
// {
// i = 9; // top of interval
// }
// else {
// i = 8;
// };
// }
// else {
// if (T > cross_T_vals_d[6]) {
// i = 7;
// }
// else {
// i = 6;
// };
// };
// }
// else {
// if (T > cross_T_vals_d[3]) {
// if (T > cross_T_vals_d[4]) {
// i = 5;
// }
// else {
// i = 4;
// };
// }
// else {
// if (T > cross_T_vals_d[2]) {
// i = 3;
// }
// else {
// if (T > cross_T_vals_d[1]) {
// i = 2;
// }
// else {
// i = 1;
// };
// };
// };
// };
// // T lies between i-1,i
// printf("i = %d\n\n", i);
// }
nu_ei = nu_eiBarconst * kB_to_3halves*n_src_or_use[threadIdx.x].n*lnLambda /
(T_use.Te*sqrt_Te);
// nu_ie = nu_ei;
// nu_eHeart = 1.87*nu_eiBar + data_k.n_n*s_en_visc*electron_thermal;
}
// For now doing velocity-independent resistive heating.
// Because although we have a magnetic correction Upsilon_zz involved, we ignored it
// since we are also squashing the effect of velocity-dependent collisions on vx and vy (which
// would produce a current in the plane) and this squashing should create heat, which
// maybe means it adds up to the velocity-independent amount of heating.
{
f64_vec3 v_n = p_v_n_use[iVertex];
v4 vie = p_vie_use[iVertex];
newdata.NeTe += h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ei*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz));
newdata.NiTi += h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_in_MT*M_in*m_n*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
newdata.NnTn += h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
if (TEST)
printf("%d v_n.z %1.9E vie_use.viz %1.9E vie_use.vez %1.9E \n areamajor %1.8E\n"
"nu_in %1.10E nu_en %1.8E \n"
"Frictional htg (NT+=): n i e %1.10E %1.10E %1.10E\n",
iVertex, v_n.z, vie.viz, vie.vez, AreaMajor[threadIdx.x],
nu_in_MT, nu_en_MT,
h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz))),
h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_in_MT*M_in*m_n*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz))),
h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ei*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz))
);
}
f64_tens3 inverted;
{
f64_tens3 LHS;
// x = neutral
// y = ion
// z = elec
// This is for NT
f64 nu_ie = nu_ei;
LHS.xx = 1.0 - h_use * (-M_en * nu_ne_MT - M_in * nu_ni_MT);
LHS.xy = -h_use * (M_in * nu_in_MT);
LHS.xz = -h_use *(M_en * nu_en_MT);
LHS.yx = -h_use * M_in * nu_ni_MT;
LHS.yy = 1.0 - h_use * (-M_in * nu_in_MT - M_ei * nu_ie);
LHS.yz = -h_use * M_ei * nu_ei;
LHS.zx = -h_use * M_en * nu_ne_MT;
LHS.zy = -h_use * M_ei * nu_ie;
LHS.zz = 1.0 - h_use * (-M_en * nu_en_MT - M_ei * nu_ei);
// some indices appear reversed because NT not T.
if (TEST) {
printf("%d LHS | \n %1.14E %1.14E %1.14E |\n %1.14E %1.14E %1.14E | \n %1.14E %1.14E %1.14E | \n",
iVertex, LHS.xx, LHS.xy, LHS.xz,
LHS.yx, LHS.yy, LHS.yz,
LHS.zx, LHS.zy, LHS.zz);
printf("GPU %d : NnTn %1.14E NeTe %1.14E nu_en_MT %1.12E \n", iVertex, newdata.NnTn, newdata.NeTe, nu_en_MT);
}
LHS.Inverse(inverted);
}
f64_vec3 RHS;
f64 nu_ie = nu_ei;
RHS.x = newdata.NnTn - h_use * (nu_ni_MT*M_in + nu_ne_MT * M_en)*newdata.NnTn
+ h_use * nu_in_MT*M_in*newdata.NiTi + h_use * nu_en_MT*M_en*newdata.NeTe;
RHS.y = newdata.NiTi - h_use * (nu_in_MT*M_in + nu_ie * M_ei)*newdata.NiTi
+ h_use * nu_ni_MT*M_in*newdata.NnTn + h_use * nu_ei*M_ei*newdata.NeTe;
RHS.z = newdata.NeTe - h_use * (nu_en_MT*M_en + nu_ei * M_ei)*newdata.NeTe
+ h_use * nu_ie*M_ei*newdata.NiTi + h_use * nu_ne_MT*M_en*newdata.NnTn;
f64_vec3 NT;
NT = inverted * RHS;
newdata.NnTn = NT.x;
newdata.NiTi = NT.y;
newdata.NeTe = NT.z;
T3 T_dest;
T_dest.Tn = newdata.NnTn* factor_neut / newdata.Nn;
T_dest.Ti = newdata.NiTi* factor / newdata.N;
T_dest.Te = newdata.NeTe* factor / newdata.N;
if (TEST) {
printf("\ninverted %d | RHS \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n"
"NnTn %1.14E NiTi %1.14E NeTe %1.14E \n"
"Tn Ti Te %1.14E %1.14E %1.14E\n",
iVertex,
inverted.xx, inverted.xy, inverted.xz, RHS.x,
inverted.yx, inverted.yy, inverted.yz, RHS.y,
inverted.zx, inverted.zy, inverted.zz, RHS.z,
newdata.NnTn, newdata.NiTi, newdata.NeTe, T_dest.Tn, T_dest.Ti, T_dest.Te);
} // This came out with a value.
if (T_dest.Te != T_dest.Te) {
printf("Advance_n_T %d : Te NaN factor %1.8E newdata.N %1.10E flag %d \n"
"n %1.10E Area %1.10E hd/dtNT %1.10E\n",
iVertex, factor, newdata.N, info.flag,
n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], h_use * NTadditionrates[iVertex].N);
}
p_T_major_dest[iVertex] = T_dest;
}
else {
// nothing to do ??
if (info.flag == OUTERMOST) {
p_n_major_dest[iVertex] = p_n_major[iVertex];
p_T_major_dest[iVertex] = p_T_major[iVertex];
}
else {
memset(p_n_major_dest + iVertex, 0, sizeof(nvals));
memset(p_T_major_dest + iVertex, 0, sizeof(T3));
};
};
}
__global__ void kernelAdvanceDensityAndTemperature_nosoak_etc(
f64 h_use,
structural * __restrict__ p_info_major,
nvals * p_n_major,
T3 * p_T_major,
NTrates * __restrict__ NTadditionrates,
nvals * p_n_use,
T3 * p_T_use,
v4 * __restrict__ p_vie_use,
f64_vec3 * __restrict__ p_v_n_use,
f64 * __restrict__ p_div_v_neut,
f64 * __restrict__ p_div_v,
f64 * __restrict__ p_Integrated_div_v_overall,
f64 * __restrict__ p_AreaMajor, // hmm
nvals * __restrict__ p_n_major_dest,
T3 * __restrict__ p_T_major_dest
)
{
// runs for major tile
// nu would have been a better choice to go in shared as it coexists with the 18 doubles in "LHS","inverted".
// Important to set 48K L1 for this routine.
__shared__ nvals n_src_or_use[threadsPerTileMajor];
__shared__ f64 AreaMajor[threadsPerTileMajor];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iVertex OF VERTEX
structural info = p_info_major[iVertex];
// if (iVertex == CHOSEN) printf("GPU iVertex %d info.flag %d \n", CHOSEN, info.flag);
if ((info.flag == DOMAIN_VERTEX)) {
n_src_or_use[threadIdx.x] = p_n_major[iVertex]; // used throughout so a good candidate to stick in shared mem
AreaMajor[threadIdx.x] = p_AreaMajor[iVertex]; // ditto
NTrates newdata;
{
NTrates AdditionNT = NTadditionrates[iVertex];
newdata.N = n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] + h_use * AdditionNT.N;
newdata.Nn = n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] + h_use * AdditionNT.Nn;
newdata.NnTn = h_use * AdditionNT.NnTn; // start off without knowing 'factor' so we can ditch AdditionNT
newdata.NiTi = h_use * AdditionNT.NiTi;
newdata.NeTe = h_use * AdditionNT.NeTe;
if (TEST1)
printf("Advance_nT NOSOAK %d : nsrc %1.12E nn %1.12E *AreaMajor %1.12E %1.12E\n"
"newdata.Nn %1.12E newdata.Ni %1.12E AreaMajor %1.14E h_use %1.10E AdditionNT N Nn %1.10E %1.10E\n"
, iVertex,
n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n,
n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x],
n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x],
newdata.Nn, newdata.N, AreaMajor[threadIdx.x], h_use,
AdditionNT.N, AdditionNT.Nn);
nvals n_dest;
f64 Div_v_overall_integrated = p_Integrated_div_v_overall[iVertex];
n_dest.n = newdata.N / (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated); // Do have to worry whether advection steps are too frequent.
n_dest.n_n = newdata.Nn / (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated); // What could do differently: know ROC area as well as mass flux through walls
p_n_major_dest[iVertex] = n_dest;
if (iVertex == VERTCHOSEN) printf("\n %d n_dest.n_n %1.14E Area_used %1.14E Div_v_overall_integ %1.13E\n\n",
iVertex, n_dest.n_n,
(AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated),
Div_v_overall_integrated);
}
// roughly right ; maybe there are improvements.
// --------------------------------------------------------------------------------------------
// Simple way of doing area ratio for exponential growth of T:
// (1/(1+h div v)) -- v outward grows the area so must be + here.
// Compressive heating:
// USE 1 iteration of Halley's method for cube root:
// cu_root Q =~~= x0(x0^3+2Q)/(2x0^3+Q) .. for us x0 = 1, Q is (1+eps)^-2
// Thus (1+2(1+eps)^-2)/(2+(1+eps)^-2)
// Multiply through by (1+eps)^2:
// ((1+eps)^2+2)/(1+2*(1+eps)^2) .. well of course it is
// eps = h div v
// Way to get reasonable answer without re-doing equations:
// Take power -1/3 and multiply once before interspecies and once after.
f64 factor, factor_neut; // used again at end
{
f64 Div_v = p_div_v[iVertex];
f64 Div_v_n = p_div_v_neut[iVertex];
factor = (3.0 + h_use * Div_v) /
(3.0 + 2.0* h_use * Div_v);
factor_neut = (3.0 + h_use * Div_v_n) /
(3.0 + 2.0*h_use * Div_v_n);
}
// gives (1+ h div v)^(-1/3), roughly
// Alternate version:
// factor = pow(pVertex->AreaCell / pVertDest->AreaCell, 2.0 / 3.0);
// pVertDest->Ion.heat = pVertex->Ion.heat*factor;
// but the actual law is with 5/3
// Comp htg dT/dt = -2/3 T div v_fluid
// factor (1/(1+h div v))^(2/3) --> that's same
{
T3 T_src = p_T_major[iVertex];
newdata.NnTn += n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] * T_src.Tn*factor_neut;
newdata.NiTi += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Ti*factor;
newdata.NeTe += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Te*factor;
//
if (TEST) {
printf("\nAdvance_nT NOSOAK %d : n %1.12E Area %1.12E compressfac %1.10E \n"
"newdata.NiTi (the new Ni Ti) %1.12E Ti_k %1.12E newdata.NeTe %1.10E Te_k %1.10E\n"
"newdata.NnTn (the new Nn Tn) %1.12E Tn_k %1.12E \n",
iVertex, n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], factor,
newdata.NiTi, T_src.Ti, newdata.NeTe, T_src.Te,
newdata.NnTn, T_src.Tn);
}
}
T3 T_dest;
T_dest.Tn = newdata.NnTn* factor_neut / newdata.Nn;
T_dest.Ti = newdata.NiTi* factor / newdata.N;
T_dest.Te = newdata.NeTe* factor / newdata.N;
if (TEST) {
printf("\nAdvance_nT NOSOAK %d : newdata.N %1.9E T_dest.Ti %1.10E Nn %1.9E T_dest.Tn %1.10E\n",
iVertex, newdata.N, T_dest.Ti, newdata.Nn, T_dest.Tn);
}
if (T_dest.Te != T_dest.Te) {
printf("Advance_n_T %d : Te NaN factor %1.8E newdata.N %1.10E flag %d \n"
"n %1.10E Area %1.10E hd/dtNT %1.10E\n",
iVertex, factor, newdata.N, info.flag,
n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], h_use * NTadditionrates[iVertex].N);
}
p_T_major_dest[iVertex] = T_dest;
}
else {
// nothing to do ??
if (info.flag == OUTERMOST) {
p_n_major_dest[iVertex] = p_n_major[iVertex];
p_T_major_dest[iVertex] = p_T_major[iVertex];
}
else {
memset(p_n_major_dest + iVertex, 0, sizeof(nvals));
memset(p_T_major_dest + iVertex, 0, sizeof(T3));
};
};
}
__global__ void kernelAdvanceDensityAndTemperature_noadvectioncompression(
f64 h_use,
structural * __restrict__ p_info_major,
nvals * p_n_major,
T3 * p_T_major,
NTrates * __restrict__ NTadditionrates,
nvals * p_n_use,
T3 * p_T_use,
v4 * __restrict__ p_vie_use,
f64_vec3 * __restrict__ p_v_n_use,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major_dest,
T3 * __restrict__ p_T_major_dest,
f64_vec3 * __restrict__ p_B_major
)
{
// runs for major tile
// nu would have been a better choice to go in shared as it coexists with the 18 doubles in "LHS","inverted".
// Important to set 48K L1 for this routine.
__shared__ nvals n_src_or_use[threadsPerTileMajor];
__shared__ f64 AreaMajor[threadsPerTileMajor];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iVertex OF VERTEX
structural info = p_info_major[iVertex];
// if (iVertex == CHOSEN) printf("GPU iVertex %d info.flag %d \n", CHOSEN, info.flag);
if ((info.flag == DOMAIN_VERTEX)) {
n_src_or_use[threadIdx.x] = p_n_major[iVertex]; // used throughout so a good candidate to stick in shared mem
AreaMajor[threadIdx.x] = p_AreaMajor[iVertex]; // ditto
NTrates newdata;
{
NTrates AdditionNT = NTadditionrates[iVertex];
newdata.N = n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] + h_use * AdditionNT.N;
newdata.Nn = n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] + h_use * AdditionNT.Nn;
newdata.NnTn = h_use * AdditionNT.NnTn; // start off without knowing 'factor' so we can ditch AdditionNT
newdata.NiTi = h_use * AdditionNT.NiTi;
newdata.NeTe = h_use * AdditionNT.NeTe;
nvals n_dest;
n_dest.n = newdata.N / (AreaMajor[threadIdx.x]);
n_dest.n_n = newdata.Nn / (AreaMajor[threadIdx.x]);
p_n_major_dest[iVertex] = n_dest;
if (TEST)
printf("Bdvance_nT %d : nsrc %1.13E nn %1.13E *AreaMajor %1.13E %1.13E\n"
"newdata.Nn %1.12E newdata.Ni %1.12E AreaMajor %1.14E n_n_k+1 %1.14E \n"
"h*additionNT.N %1.14E h*additionNT.Nn %1.14E h %1.14E h*addNT.NeTe %1.14E\n"
, iVertex,
n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n,
n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x],
n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x],
newdata.Nn, newdata.N, AreaMajor[threadIdx.x], n_dest.n_n,
h_use*AdditionNT.N, h_use*AdditionNT.Nn, h_use,
h_use*AdditionNT.NeTe);
}
f64 factor, factor_neut; // used again at end
{
T3 T_src = p_T_major[iVertex];
newdata.NnTn += n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] * T_src.Tn;
newdata.NiTi += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Ti;
newdata.NeTe += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Te;
//
if (TEST_T) {
printf("\nCdvance_nT %d : n %1.12E Area %1.12E compressfac %1.10E \n"
"newdata.NiTi %1.12E Ti_k %1.12E newdata.NeTe %1.10E Te_k %1.10E\n"
"newdata.NnTn %1.12E Tn_k %1.12E \n"
,
iVertex, n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], factor,
newdata.NiTi,T_src.Ti, newdata.NeTe, T_src.Te,
newdata.NnTn, T_src.Tn);
}
}
f64 nu_ne_MT, nu_en_MT, nu_ni_MT, nu_in_MT, nu_ei_effective; // optimize after
f64 nu_eiBar;
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal, lnLambda, s_in_MT, s_en_MT, s_en_visc;
n_src_or_use[threadIdx.x] = p_n_use[iVertex];
T3 T_use = p_T_use[iVertex];
sqrt_Te = sqrt(T_use.Te); // should be "usedata"
ionneut_thermal = sqrt(T_use.Ti / m_ion + T_use.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_src_or_use[threadIdx.x].n, T_use.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T_use.Ti*one_over_kB,
&s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T_use.Te*one_over_kB, // call with T in electronVolts
&s_en_MT,
&s_en_visc);
//s_en_MT = Estimate_Ion_Neutral_MT_Cross_section(T_use.Te*one_over_kB);
//s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_use.Te*one_over_kB);
if (n_src_or_use[threadIdx.x].n_n > ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n) {
s_en_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
s_in_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
}
// ARTIFICIAL CHANGE TO STOP HAVING TO WORRY ABOUT SILLY VALUES IN AREAS THAT DON'T MATTER MUCH :
s_en_MT *= ArtificialUpliftFactor(n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n);
s_in_MT *= ArtificialUpliftFactor(n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n); // returns factor 1.0 if n+nn > 1.0e14.
// Send heat into neutrals if there's not much stuff here total.
// Need nu_ne etc to be defined:
nu_ne_MT = s_en_MT * n_src_or_use[threadIdx.x].n * electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT = s_in_MT * n_src_or_use[threadIdx.x].n * ionneut_thermal;
nu_en_MT = s_en_MT * n_src_or_use[threadIdx.x].n_n*electron_thermal;
nu_in_MT = s_in_MT * n_src_or_use[threadIdx.x].n_n*ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*max(MINIMUM_NU_EI_DENSITY,n_src_or_use[threadIdx.x].n)*lnLambda / (T_use.Te*sqrt_Te);
f64 nu_eHeart = 1.87*nu_eiBar + n_src_or_use[threadIdx.x].n_n*s_en_visc*electron_thermal;
f64_vec3 omega = p_B_major[iVertex] * qovermc;
// Confusing, why does this say that? We used visc en in nu_eHeart, explanation?
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*
(nu_eHeart*nu_eHeart + omega.z*omega.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega.dot(omega))));
if (TEST) printf("%d nu_eiBar: %1.10E n %1.10E lnLambda %1.10E T_use %1.10E \n"
"nu_eHeart %1.10E omega %1.8E %1.8E %1.8E qovermc %1.8E nu_eiBar/nu_eHeart %1.8E \n"
"nunuomegaomegafac %1.9E ratio %1.9E 1.0-0.9* = %1.9E nu_ei_effective %1.9E\n",
iVertex, nu_eiBar, n_src_or_use[threadIdx.x].n, lnLambda, T_use.Te,
nu_eHeart, omega.x, omega.y, omega.z, qovermc,
nu_eiBar / nu_eHeart,
(nu_eHeart*nu_eHeart + omega.z*omega.z) / (nu_eHeart*nu_eHeart + omega.dot(omega)),
nu_eiBar*
(nu_eHeart*nu_eHeart + omega.z*omega.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega.dot(omega))),
(1.0 - 0.9*nu_eiBar*
(nu_eHeart*nu_eHeart + omega.z*omega.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega.dot(omega)))),
nu_ei_effective
);
// nu_ie = nu_ei;
// nu_eHeart = 1.87*nu_eiBar + data_k.n_n*s_en_visc*electron_thermal;
}
// For now doing velocity-independent resistive heating.
// Because although we have a magnetic correction Upsilon_zz involved, we ignored it
// since we are also squashing the effect of velocity-dependent collisions on vx and vy (which
// would produce a current in the plane) and this squashing should create heat, which
// maybe means it adds up to the velocity-independent amount of heating.
{
f64_vec3 v_n = p_v_n_use[iVertex];
v4 vie = p_vie_use[iVertex];
newdata.NeTe += h_use*(AreaMajor[threadIdx.x]*n_src_or_use[threadIdx.x].n * TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n* TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz));
// I see that I did resistive heating for nu_ei but did something much more complicated in the acceleration routine.
// That isn't quite right then.
newdata.NiTi += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n * TWOTHIRDS*nu_in_MT*M_in*m_n*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
newdata.NnTn += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n_n * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
if (TEST) {
printf(
"%d v_n.z %1.9E vie_use.viz %1.9E vie_use.vez %1.9E Frictional htg (NT+=):e %1.10E\n"
"elec e-n z htg: %1.10E i-e z htg: %1.10E \n",
iVertex, v_n.z, vie.viz, vie.vez,
h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n* TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n*TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz)),
h_use*AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n*TWOTHIRDS*nu_en_MT*m_en*(v_n.z - vie.vez)*(v_n.z - vie.vez),
h_use*AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n*TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz)
);
};
}
f64_tens3 inverted;
{
f64_tens3 LHS;
// x = neutral
// y = ion
// z = elec
// This is for NT
f64 nu_ie = nu_eiBar;
// gonna have to change to Backward Euler :/
// 6th Nov 2019 : add 2* so that it all goes here.
LHS.xx = 1.0 - 2.0*h_use * (-M_en * nu_ne_MT - M_in * nu_ni_MT);
LHS.xy = -2.0*h_use * (M_in * nu_in_MT);
LHS.xz = -2.0*h_use *(M_en * nu_en_MT);
LHS.yx = -2.0*h_use * M_in * nu_ni_MT;
LHS.yy = 1.0 - 2.0*h_use * (-M_in * nu_in_MT - M_ei * nu_ie);
LHS.yz = -2.0*h_use * M_ei * nu_eiBar;
LHS.zx = -2.0*h_use * M_en * nu_ne_MT;
LHS.zy = -2.0*h_use * M_ei * nu_ie;
LHS.zz = 1.0 - 2.0*h_use * (-M_en * nu_en_MT - M_ei * nu_eiBar);
if (TEST) {
printf("%d LHS | \n %1.14E %1.14E %1.14E |\n %1.14E %1.14E %1.14E | \n %1.14E %1.14E %1.14E | \n"
,
iVertex, LHS.xx, LHS.xy, LHS.xz,
LHS.yx, LHS.yy, LHS.yz,
LHS.zx, LHS.zy, LHS.zz);
}
LHS.Inverse(inverted);
}
f64_vec3 RHS;
f64 nu_ie = nu_eiBar;
// gonna have to change to Backward Euler :/
//RHS.x = newdata.NnTn - h_use * (nu_ni_MT*M_in + nu_ne_MT * M_en)*newdata.NnTn
// + h_use * nu_in_MT*M_in*newdata.NiTi + h_use * nu_en_MT*M_en*newdata.NeTe;
//RHS.y = newdata.NiTi - h_use * (nu_in_MT*M_in + nu_ie * M_ei)*newdata.NiTi
// + h_use * nu_ni_MT*M_in*newdata.NnTn + h_use * nu_ei*M_ei*newdata.NeTe;
//RHS.z = newdata.NeTe - h_use * (nu_en_MT*M_en + nu_ei * M_ei)*newdata.NeTe
// + h_use * nu_ie*M_ei*newdata.NiTi + h_use * nu_ne_MT*M_en*newdata.NnTn;
RHS.x = newdata.NnTn;
RHS.y = newdata.NiTi;
RHS.z = newdata.NeTe;
f64_vec3 NT;
NT = inverted * RHS;
newdata.NnTn = NT.x;
newdata.NiTi = NT.y;
newdata.NeTe = NT.z;
T3 T_dest;
T_dest.Tn = newdata.NnTn / newdata.Nn;
T_dest.Ti = newdata.NiTi / newdata.N;
T_dest.Te = newdata.NeTe/ newdata.N;
if (TEST) {
printf("\ninverted %d | RHS \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n"
" NnTn %1.14E NiTi %1.14E NeTe %1.14E \n"
"Tn Ti Te %1.14E %1.14E %1.14E\n\n"
,
iVertex, inverted.xx, inverted.xy, inverted.xz, RHS.x,
inverted.yx, inverted.yy, inverted.yz, RHS.y,
inverted.zx, inverted.zy, inverted.zz, RHS.z,
newdata.NnTn, newdata.NiTi, newdata.NeTe,
T_dest.Tn, T_dest.Ti, T_dest.Te);
} // This came out with a value.
// if (T_dest.Te != T_dest.Te) {
// printf("Advance_n_T %d : Te NaN factor %1.8E newdata.N %1.10E flag %d \n"
// "n %1.10E Area %1.10E hd/dtNT %1.10E\n",
// iVertex, factor, newdata.N, info.flag,
// n_src_or_use[threadIdx.x].n,AreaMajor[threadIdx.x] , h_use * NTadditionrates[iVertex].N);
// }
p_T_major_dest[iVertex] = T_dest;
} else {
// nothing to do ??
if (info.flag == OUTERMOST) {
p_n_major_dest[iVertex] = p_n_major[iVertex];
p_T_major_dest[iVertex] = p_T_major[iVertex];
}
else {
memset(p_n_major_dest + iVertex, 0, sizeof(nvals));
memset(p_T_major_dest + iVertex, 0, sizeof(T3));
};
};
}
__global__ void kernelAdvanceDensityAndTemperature_noadvectioncompression_Copy(
f64 h_use,
structural * __restrict__ p_info_major,
nvals * p_n_major,
T3 * p_T_major,
NTrates * __restrict__ NTadditionrates,
nvals * p_n_use,
T3 * p_T_use,
v4 * __restrict__ p_vie_use,
f64_vec3 * __restrict__ p_v_n_use,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major_dest,
T3 * __restrict__ p_T_major_dest,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_Tgraph_resistive,
f64 * __restrict__ p_Tgraph_other,
f64 * __restrict__ p_Tgraph_total,
f64 * __restrict__ p_Tgraph_dNT
)
{
// runs for major tile
// nu would have been a better choice to go in shared as it coexists with the 18 doubles in "LHS","inverted".
// Important to set 48K L1 for this routine.
__shared__ nvals n_src_or_use[threadsPerTileMajor];
__shared__ f64 AreaMajor[threadsPerTileMajor];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iVertex OF VERTEX
structural info = p_info_major[iVertex];
// if (iVertex == CHOSEN) printf("GPU iVertex %d info.flag %d \n", CHOSEN, info.flag);
if ((info.flag == DOMAIN_VERTEX)) {
n_src_or_use[threadIdx.x] = p_n_major[iVertex]; // used throughout so a good candidate to stick in shared mem
AreaMajor[threadIdx.x] = p_AreaMajor[iVertex]; // ditto
NTrates newdata;
{
NTrates AdditionNT = NTadditionrates[iVertex];
newdata.N = n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] + h_use * AdditionNT.N;
newdata.Nn = n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] + h_use * AdditionNT.Nn;
newdata.NnTn = h_use * AdditionNT.NnTn; // start off without knowing 'factor' so we can ditch AdditionNT
newdata.NiTi = h_use * AdditionNT.NiTi;
newdata.NeTe = h_use * AdditionNT.NeTe;
nvals n_dest;
n_dest.n = newdata.N / (AreaMajor[threadIdx.x]);
n_dest.n_n = newdata.Nn / (AreaMajor[threadIdx.x]);
p_n_major_dest[iVertex] = n_dest;
if (TEST)
printf("Bdvance_nT %d : nsrc %1.13E nn %1.13E *AreaMajor %1.13E %1.13E\n"
"newdata.Nn %1.12E newdata.Ni %1.12E AreaMajor %1.14E n_n_k+1 %1.14E \n"
"h*additionNT.N %1.14E h*additionNT.Nn %1.14E h %1.14E \n"
, VERTCHOSEN,
n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n,
n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x],
n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x],
newdata.Nn, newdata.N, AreaMajor[threadIdx.x], n_dest.n_n,
h_use*AdditionNT.N, h_use*AdditionNT.Nn, h_use);
}
f64 factor, factor_neut; // used again at end
T3 T_src = p_T_major[iVertex];
newdata.NnTn += n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] * T_src.Tn;
newdata.NiTi += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Ti;
newdata.NeTe += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Te;
//
if (TEST) {
printf("\nAdvance_nT %d : n %1.12E Area %1.12E compressfac %1.10E \n"
"newdata.NiTi %1.12E Ti_k %1.12E newdata.NeTe %1.10E Te_k %1.10E\n"
"newdata.NnTn %1.12E Tn_k %1.12E \n"
,
VERTCHOSEN, n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], factor,
newdata.NiTi, T_src.Ti, newdata.NeTe, T_src.Te,
newdata.NnTn, T_src.Tn);
}
f64 nu_ne_MT, nu_en_MT, nu_ni_MT, nu_in_MT, nu_ei_effective; // optimize after
f64 nu_eiBar;
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal, lnLambda, s_in_MT, s_en_MT, s_en_visc;
n_src_or_use[threadIdx.x] = p_n_use[iVertex];
T3 T_use = p_T_use[iVertex];
sqrt_Te = sqrt(T_use.Te); // should be "usedata"
ionneut_thermal = sqrt(T_use.Ti / m_ion + T_use.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_src_or_use[threadIdx.x].n, T_use.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T_use.Ti*one_over_kB,
&s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T_use.Te*one_over_kB, // call with T in electronVolts
&s_en_MT,
&s_en_visc);
//s_en_MT = Estimate_Ion_Neutral_MT_Cross_section(T_use.Te*one_over_kB);
//s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_use.Te*one_over_kB);
if (n_src_or_use[threadIdx.x].n_n > ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n) {
s_en_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
s_in_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
}
// ARTIFICIAL CHANGE TO STOP HAVING TO WORRY ABOUT SILLY VALUES IN AREAS THAT DON'T MATTER MUCH :
s_en_MT *= ArtificialUpliftFactor(n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n);
s_in_MT *= ArtificialUpliftFactor(n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n); // returns factor 1.0 if n+nn > 1.0e14.
// Send heat into neutrals if there's not much stuff here total.
// Need nu_ne etc to be defined:
nu_ne_MT = s_en_MT * n_src_or_use[threadIdx.x].n * electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT = s_in_MT * n_src_or_use[threadIdx.x].n * ionneut_thermal;
nu_en_MT = s_en_MT * n_src_or_use[threadIdx.x].n_n*electron_thermal;
nu_in_MT = s_in_MT * n_src_or_use[threadIdx.x].n_n*ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*max(MINIMUM_NU_EI_DENSITY, n_src_or_use[threadIdx.x].n)*lnLambda / (T_use.Te*sqrt_Te);
if (TEST) {
printf("nu_eiBar %1.12E n %1.12E lnLambda %1.10E \n\n", nu_eiBar, n_src_or_use[threadIdx.x].n, lnLambda);
real Te_eV = T_use.Te*one_over_kB;
real Te_eV2 = Te_eV*Te_eV;
real Te_eV3 = Te_eV*Te_eV2;
if (n_src_or_use[threadIdx.x].n*Te_eV3 > 0.0) {
f64 lnLambda1 = 23.0 - 0.5*log(n_src_or_use[threadIdx.x].n / Te_eV3);
f64 lnLambda2 = 24.0 - 0.5*log(n_src_or_use[threadIdx.x].n / Te_eV2);
// smooth between the two:
f64 factorxx = 2.0*fabs(Te_eV - 10.0)*(Te_eV - 10.0) / (1.0 + 4.0*(Te_eV - 10.0)*(Te_eV - 10.0));
lnLambda = lnLambda1*(0.5 - factorxx) + lnLambda2*(0.5 + factorxx);
printf("lnLambda1 2 %1.14E %1.14E lnLambda %1.14E Te_eV %1.12E factorxx %1.12E \n", lnLambda1, lnLambda2, lnLambda, Te_eV, factorxx);
// floor at 2 just in case, but it should not get near:
f64 lnLambda_sq = lnLambda*lnLambda;
factorxx = 1.0 + 0.5*lnLambda + 0.25*lnLambda_sq + 0.125*lnLambda*lnLambda_sq + 0.0625*lnLambda_sq*lnLambda_sq;
lnLambda += 2.0 / factorxx;
printf("lnLambda %1.14E after floor at 2 ... \n", lnLambda);
if (lnLambda < 2.0) lnLambda = 2.0;
};
};
f64 nu_eHeart = 1.87*nu_eiBar + n_src_or_use[threadIdx.x].n_n*s_en_visc*electron_thermal;
f64_vec3 omega = p_B_major[iVertex] * qovermc;
// Confusing, why does this say that? We used visc en in nu_eHeart, explanation?
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*
(nu_eHeart*nu_eHeart + omega.z*omega.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega.dot(omega))));
// nu_ie = nu_ei;
// nu_eHeart = 1.87*nu_eiBar + data_k.n_n*s_en_visc*electron_thermal;
}
// For now doing velocity-independent resistive heating.
// Because although we have a magnetic correction Upsilon_zz involved, we ignored it
// since we are also squashing the effect of velocity-dependent collisions on vx and vy (which
// would produce a current in the plane) and this squashing should create heat, which
// maybe means it adds up to the velocity-independent amount of heating.
{
f64_vec3 v_n = p_v_n_use[iVertex];
v4 vie = p_vie_use[iVertex];
newdata.NeTe += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n * TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n* TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz));
p_Tgraph_resistive[iVertex] = TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz);
// I see that I did resistive heating for nu_ei but did something much more complicated in the acceleration routine.
// That isn't quite right then.
newdata.NiTi += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n * TWOTHIRDS*nu_in_MT*M_in*m_n*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
newdata.NnTn += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n_n * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
//if (TEST)
// printf("%d v_n.z %1.9E vie_use.viz %1.9E vie_use.vez %1.9E \n areamajor %1.8E\n"
// "nu_in %1.10E nu_en %1.8E \n"
// "Frictional htg (NT+=): n i e %1.10E %1.10E %1.10E\n",
// VERTCHOSEN, v_n.z, vie.viz, vie.vez, AreaMajor[threadIdx.x],
// nu_in_MT, nu_en_MT,
// h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
// (v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
// + (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
// + (v_n.z - vie.viz)*(v_n.z - vie.viz))),
// h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_in_MT*M_in*m_n*(
// (v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
// + (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
// + (v_n.z - vie.viz)*(v_n.z - vie.viz))),
// h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_en_MT*m_en*(
// (v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
// + (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
// + (v_n.z - vie.vez)*(v_n.z - vie.vez))
// + AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz))
// );
}
f64_tens3 inverted;
{
f64_tens3 LHS;
// x = neutral
// y = ion
// z = elec
// This is for NT
f64 nu_ie = nu_eiBar;
// gonna have to change to Backward Euler :/
// 6th Nov 2019 : add 2* so that it all goes here.
LHS.xx = 1.0 - 2.0*h_use * (-M_en * nu_ne_MT - M_in * nu_ni_MT);
LHS.xy = -2.0*h_use * (M_in * nu_in_MT);
LHS.xz = -2.0*h_use *(M_en * nu_en_MT);
LHS.yx = -2.0*h_use * M_in * nu_ni_MT;
LHS.yy = 1.0 - 2.0*h_use * (-M_in * nu_in_MT - M_ei * nu_ie);
LHS.yz = -2.0*h_use * M_ei * nu_eiBar;
LHS.zx = -2.0*h_use * M_en * nu_ne_MT;
LHS.zy = -2.0*h_use * M_ei * nu_ie;
LHS.zz = 1.0 - 2.0*h_use * (-M_en * nu_en_MT - M_ei * nu_eiBar);
// some indices appear reversed because NT not T.
if (TEST) printf("LHS.zz %1.10E h_use %1.10E M_en %1.10E nu_en_MT %1.10E nu_eiBar %1.10E\n",
LHS.zz, h_use, M_en, nu_en_MT, nu_eiBar);
if (TEST) {
printf("LHS | \n %1.14E %1.14E %1.14E |\n %1.14E %1.14E %1.14E | \n %1.14E %1.14E %1.14E | \n",
LHS.xx, LHS.xy, LHS.xz,
LHS.yx, LHS.yy, LHS.yz,
LHS.zx, LHS.zy, LHS.zz);
printf("GPU %d : NnTn %1.14E NeTe %1.14E \n", VERTCHOSEN, newdata.NnTn, newdata.NeTe);
printf("GPU nu_en_MT %1.14E\n", nu_en_MT);
}
LHS.Inverse(inverted);
}
f64_vec3 RHS;
f64 nu_ie = nu_eiBar;
// gonna have to change to Backward Euler :/
//RHS.x = newdata.NnTn - h_use * (nu_ni_MT*M_in + nu_ne_MT * M_en)*newdata.NnTn
// + h_use * nu_in_MT*M_in*newdata.NiTi + h_use * nu_en_MT*M_en*newdata.NeTe;
//RHS.y = newdata.NiTi - h_use * (nu_in_MT*M_in + nu_ie * M_ei)*newdata.NiTi
// + h_use * nu_ni_MT*M_in*newdata.NnTn + h_use * nu_ei*M_ei*newdata.NeTe;
//RHS.z = newdata.NeTe - h_use * (nu_en_MT*M_en + nu_ei * M_ei)*newdata.NeTe
// + h_use * nu_ie*M_ei*newdata.NiTi + h_use * nu_ne_MT*M_en*newdata.NnTn;
RHS.x = newdata.NnTn;
RHS.y = newdata.NiTi;
RHS.z = newdata.NeTe;
f64_vec3 NT;
NT = inverted * RHS;
newdata.NnTn = NT.x;
newdata.NiTi = NT.y;
newdata.NeTe = NT.z;
T3 T_dest;
T_dest.Tn = newdata.NnTn / newdata.Nn;
T_dest.Ti = newdata.NiTi / newdata.N;
T_dest.Te = newdata.NeTe / newdata.N;
if (TEST) {
printf("\ninverted | RHS \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n",
inverted.xx, inverted.xy, inverted.xz, RHS.x,
inverted.yx, inverted.yy, inverted.yz, RHS.y,
inverted.zx, inverted.zy, inverted.zz, RHS.z);
printf("GPU %d : NnTn %1.14E NiTi %1.14E NeTe %1.14E \n"
"Tn Ti Te %1.14E %1.14E %1.14E\n", VERTCHOSEN, newdata.NnTn, newdata.NiTi, newdata.NeTe,
T_dest.Tn, T_dest.Ti, T_dest.Te);
} // This came out with a value.
if (TEST) printf("%d : T_dest %1.8E %1.8E %1.8E \n"
"newdata .NeTe %1.10E .N %1.10E factor %1.10E\n\n",
iVertex, T_dest.Tn, T_dest.Ti, T_dest.Te,
newdata.NeTe, newdata.N, factor
);
p_T_major_dest[iVertex] = T_dest;
p_Tgraph_other[iVertex] = 2.0 * M_en * nu_en_MT*(T_dest.Tn - T_dest.Te)
+ 2.0 * M_ei * nu_eiBar*(T_dest.Ti - T_dest.Te);
p_Tgraph_total[iVertex] = (T_dest.Te - T_src.Te) / h_use;
p_Tgraph_dNT[iVertex] = (T_dest.Te - T_src.Te)* newdata.N / (AreaMajor[threadIdx.x] * h_use);
} else {
// nothing to do ??
if (info.flag == OUTERMOST) {
p_n_major_dest[iVertex] = p_n_major[iVertex];
p_T_major_dest[iVertex] = p_T_major[iVertex];
}
else {
memset(p_n_major_dest + iVertex, 0, sizeof(nvals));
memset(p_T_major_dest + iVertex, 0, sizeof(T3));
};
};
}
/*
__global__ void kernelCalculateUpwindDensity_tris(
structural * __restrict__ p_info_minor,
ShardModel * __restrict__ p_n_shard_n_major,
ShardModel * __restrict__ p_n_shard_major,
v4 * __restrict__ p_vie_minor,
f64_vec3 * __restrict__ p_v_n_minor,
f64_vec2 * __restrict__ p_overall_v_minor,
LONG3 * __restrict__ p_tricornerindex,
LONG3 * __restrict__ p_trineighindex,
LONG3 * __restrict__ p_which_iTri_number_am_I,
CHAR4 * __restrict__ p_szPBCneigh_tris,
nvals * __restrict__ p_n_upwind_minor, // result
T3 * __restrict__ p_T_minor,
T3 * __restrict__ p_T_upwind_minor // result
)
{
// The idea is to take the upwind n on each side of each
// major edge through this tri, weighted by |v.edge_normal|
// to produce an average.
__shared__ f64_vec2 shared_pos[threadsPerTileMinor]; // 4 doubles/vertex
__shared__ f64_12 shared_shards[threadsPerTileMajor]; // + 12
// 15 doubles right there. Max 21 for 288 vertices. 16 is okay.
// Might as well stick 1 more double in there if we get worried about registers.
// #############################################%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%###############
// We need a reverse index: this triangle carry 3 indices to know who it is to its corners.
long const iTri = blockDim.x*blockIdx.x + threadIdx.x;
structural const info = p_info_minor[iTri];
nvals result;
T3 upwindT;
shared_pos[threadIdx.x] = info.pos;
long const StartMajor = blockIdx.x*threadsPerTileMajor;
long const EndMajor = StartMajor + threadsPerTileMajor;
long const StartMinor = blockIdx.x*threadsPerTileMinor;
long const EndMinor = StartMinor + threadsPerTileMinor;
if (threadIdx.x < threadsPerTileMajor)
{
memcpy(&(shared_shards[threadIdx.x].n), &(p_n_shard_major[threadsPerTileMajor*blockIdx.x + threadIdx.x].n), MAXNEIGH * sizeof(f64));
// efficiency vs memcpy? We only need 12 here, not the centre.
}
__syncthreads();
f64 n0, n1, n2;
T3 T0, T1, T2;
f64_vec2 edge_normal0, edge_normal1, edge_normal2;
LONG3 tricornerindex, trineighindex;
LONG3 who_am_I;
f64_vec2 v_overall;
char szPBC_triminor[6];
CHAR4 szPBC_neighs;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)
|| (info.flag == CROSSING_CATH))
{
// Several things we need to collect:
// . v in this triangle and mesh v at this triangle centre.
// . edge_normal going each way
// . n that applies from each corner
// How to get n that applies from each corner:
tricornerindex = p_tricornerindex[iTri];
who_am_I = p_which_iTri_number_am_I[iTri];
szPBC_neighs = p_szPBCneigh_tris[iTri];
// Wasteful:
T0 = p_T_minor[tricornerindex.i1 + BEGINNING_OF_CENTRAL];
T1 = p_T_minor[tricornerindex.i2 + BEGINNING_OF_CENTRAL];
T2 = p_T_minor[tricornerindex.i3 + BEGINNING_OF_CENTRAL];
if ((tricornerindex.i1 >= StartMajor) && (tricornerindex.i1 < EndMajor))
{
n0 = shared_shards[tricornerindex.i1 - StartMajor].n[who_am_I.i1]; // whoa, be careful with data type / array
}
else {
n0 = p_n_shard_major[tricornerindex.i1].n[who_am_I.i1];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
if ((tricornerindex.i2 >= StartMajor) && (tricornerindex.i2 < EndMajor))
{
n1 = shared_shards[tricornerindex.i2 - StartMajor].n[who_am_I.i2];
}
else {
n1 = p_n_shard_major[tricornerindex.i2].n[who_am_I.i2];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
if ((tricornerindex.i3 >= StartMajor) && (tricornerindex.i3 < EndMajor))
{
n2 = shared_shards[tricornerindex.i3 - StartMajor].n[who_am_I.i3];
}
else {
n2 = p_n_shard_major[tricornerindex.i3].n[who_am_I.i3];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
v_overall = p_overall_v_minor[iTri];
f64_vec2 relv = p_vie_minor[iTri].vxy - v_overall;
// So this relies on the assumption that n = 0 outside of domain.
if ((info.flag == CROSSING_INS) || (info.flag == CROSSING_CATH)) {
int number_within = (n0 > 0.0) ? 1 : 0 + (n1 > 0.0) ? 1 : 0 + (n2 > 0.0) ? 1 : 0;
if (number_within == 1) {
result.n = n0 + n1 + n2;
upwindT.Te = T0.Te + T1.Te + T2.Te;
upwindT.Tn = T0.Tn + T1.Tn + T2.Tn;
upwindT.Ti = T0.Ti + T1.Ti + T2.Ti;
}
else {
// quick way not upwind:
result.n = 0.5*(n0 + n1 + n2);
upwindT.Te = 0.5*(T0.Te + T1.Te + T2.Te);
upwindT.Tn = 0.5*(T0.Tn + T1.Tn + T2.Tn);
upwindT.Ti = 0.5*(T0.Ti + T1.Ti + T2.Ti); // watch out for heat evacuating CROSSING_INS tris.
}
//if (iTri == 23400) printf("\n23400 was an insulator tri, T012 %1.8E %1.8E %1.8E upwind %1.8E\n"
// "indexcorner %d %d %d\n\n",
// T0.Te,T1.Te,T2.Te,upwindT.Te,
// tricornerindex.i1, tricornerindex.i1, tricornerindex.i3);
if (info.flag == CROSSING_CATH) {
// set n = 0 if position is within cathode rod:
if (!TestDomainPos(info.pos)) {
result.n = 0.0;
}
}
} else {
trineighindex = p_trineighindex[iTri];
// if (iTri == CHOSEN) printf("%d GPU: n0 %1.14E n1 %1.14E n2 %1.14E \n"
// "relv GPU %1.14E %1.14E \n",
// CHOSEN, n0, n1, n2, relv.x, relv.y);
f64_vec2 nearby_pos;
if ((trineighindex.i1 >= StartMinor) && (trineighindex.i1 < EndMinor)) {
nearby_pos = shared_pos[trineighindex.i1 - StartMinor];
}
else {
nearby_pos = p_info_minor[trineighindex.i1].pos;
}
if (szPBC_neighs.per0 == ROTATE_ME_CLOCKWISE) {
nearby_pos = Clockwise_d*nearby_pos;
}
if (szPBC_neighs.per0 == ROTATE_ME_ANTICLOCKWISE) {
nearby_pos = Anticlockwise_d*nearby_pos;
}
// Slightly puzzled why we don't just take difference of 2 corners of our triangle.
// Why dealing with tri positions instead of vertex positions? Because tri positions
// are the corners of the major cell.
edge_normal0.x = nearby_pos.y - info.pos.y;
edge_normal0.y = info.pos.x - nearby_pos.x;
// CAREFUL : which side is which???
// tri centre 2 is on same side of origin as corner 1 -- I think
// We don't know if the corners have been numbered anticlockwise?
// Could arrange it though.
// So 1 is anticlockwise for edge 0.
f64 numerator = 0.0;
f64 dot1, dot2;
f64 dot0 = relv.dot(edge_normal0);
if ((trineighindex.i2 >= StartMinor) && (trineighindex.i2 < EndMinor)) {
nearby_pos = shared_pos[trineighindex.i2 - StartMinor];
}
else {
nearby_pos = p_info_minor[trineighindex.i2].pos;
}
if (szPBC_neighs.per1 == ROTATE_ME_CLOCKWISE) {
nearby_pos = Clockwise_d*nearby_pos;
}
if (szPBC_neighs.per1 == ROTATE_ME_ANTICLOCKWISE) {
nearby_pos = Anticlockwise_d*nearby_pos;
}
edge_normal1.x = nearby_pos.y - info.pos.y;
edge_normal1.y = info.pos.x - nearby_pos.x;
dot1 = relv.dot(edge_normal1);
if ((trineighindex.i3 >= StartMinor) && (trineighindex.i3 < EndMinor)) {
nearby_pos = shared_pos[trineighindex.i3 - StartMinor];
}
else {
nearby_pos = p_info_minor[trineighindex.i3].pos;
}
if (szPBC_neighs.per2 == ROTATE_ME_CLOCKWISE) {
nearby_pos = Clockwise_d*nearby_pos;
}
if (szPBC_neighs.per2 == ROTATE_ME_ANTICLOCKWISE) {
nearby_pos = Anticlockwise_d*nearby_pos;
}
edge_normal2.x = nearby_pos.y - info.pos.y;
edge_normal2.y = info.pos.x - nearby_pos.x;
dot2 = relv.dot(edge_normal2);
bool b0, b1, b2; // is this n012 legit?
if (dot0 > 0.0) { b2 = 1; }
else { b1 = 1; };
if (dot1 > 0.0) { b0 = 1; }
else { b2 = 1; };
if (dot2 > 0.0) { b1 = 1; }
else { b0 = 1; };
//Usually now only one of b012 is false.
if (b0 == 0) {
if (b1 == 0) {
result.n = n2; // how idk
memcpy(&upwindT, &T2, sizeof(T3));
} else {
if (b2 == 0) {
result.n = n1;
memcpy(&upwindT, &T1, sizeof(T3));
} else {
result.n = min(n1, n2);
upwindT.Te = min(T1.Te, T2.Te);
upwindT.Ti = min(T1.Ti, T2.Ti);
}
}
} else {
if ((b1 == 0) && (b2 == 0)) {
result.n = n0;
memcpy(&upwindT, &T0, sizeof(T3));
} else {
if (b1 == 0) {
result.n = min(n0, n2);
memcpy(&upwindT, &T2, sizeof(T3));
} else {
if (b2 == 0)
{
result.n = min(n0, n1);
upwindT.Te = min(T1.Te, T0.Te);
upwindT.Ti = min(T1.Ti, T0.Ti);
} else {
result.n = min(min(n0, n1), n2);
upwindT.Te = min(T0.Te, min(T1.Te, T2.Te));
upwindT.Ti = min(T0.Ti, min(T1.Ti, T2.Ti));
}
}
}
}
// if (iTri == 23435) printf("CALC UPWIND n\n"
// "tricornerindex %d %d %d\n"
// "n0 n1 n2 %1.12E %1.12E %1.12E\n"
// "relv %1.9E %1.9E \n"
// "edge_nml %1.9E %1.9E | %1.9E %1.9E | %1.9E %1.9E \n"
// "dot %1.9E %1.9E %1.9E\n"
// "b0 b1 b2 %d %d %d \n"
// "result.n %1.9E\n\n",
// tricornerindex.i1, tricornerindex.i2, tricornerindex.i3,
// n0, n1, n2,
// relv.x, relv.y,
// edge_normal0.x, edge_normal0.y, edge_normal1.x, edge_normal1.y, edge_normal2.x, edge_normal2.y,
// dot0, dot1, dot2,
// (b0 ? 1 : 0), (b1 ? 1 : 0), (b2 ? 1 : 0),
// result.n);
//
//
// if (iTri == 23400) printf("\n23400 was a domain tri, T012 %1.8E %1.8E %1.8E upwind %1.8E\n"
// "relv %1.8E %1.8E b012 %d %d %d \n\n",
// T0.Te, T1.Te, T2.Te, upwindT.Te,
// relv.x, relv.y, (int)b0, (int)b1, (int)b2);
// Alternative way: try using squared weights of upwind n for v.dot(edgenormal).
// This old, doesn't work when JxB force empties out near ins:
// Argument against fabs in favour of squared weights?
};
// Think carefully / debug how it goes for CROSSING_INS.
} else {
result.n = 0.0;
memset(&upwindT, 0, sizeof(T3));
};
// Now same for upwind neutral density:
// In order to use syncthreads we had to come out of the branching.
if (threadIdx.x < threadsPerTileMajor)
{
memcpy(&(shared_shards[threadIdx.x].n),
&(p_n_shard_n_major[threadsPerTileMajor*blockIdx.x + threadIdx.x].n),
sizeof(f64)*MAXNEIGH);
// efficiency vs memcpy? We only need 12 here, not the centre.
}
__syncthreads();
// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)
|| (info.flag == CROSSING_CATH))
{
if ((tricornerindex.i1 >= StartMajor) && (tricornerindex.i1 < EndMajor))
{
n0 = shared_shards[tricornerindex.i1 - StartMajor].n[who_am_I.i1];
}
else {
n0 = p_n_shard_n_major[tricornerindex.i1].n[who_am_I.i1];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
if ((tricornerindex.i2 >= StartMajor) && (tricornerindex.i2 < EndMajor))
{
n1 = shared_shards[tricornerindex.i2 - StartMajor].n[who_am_I.i2];
} else {
n1 = p_n_shard_n_major[tricornerindex.i2].n[who_am_I.i2];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
if ((tricornerindex.i3 >= StartMajor) && (tricornerindex.i3 < EndMajor))
{
n2 = shared_shards[tricornerindex.i3 - StartMajor].n[who_am_I.i3];
} else {
n2 = p_n_shard_n_major[tricornerindex.i3].n[who_am_I.i3];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
f64_vec2 relv = p_v_n_minor[iTri].xypart() - v_overall;
if ((info.flag == CROSSING_INS) || (info.flag == CROSSING_CATH)) {
int number_within = ((n0 > 0.0) ? 1 : 0) + ((n1 > 0.0) ? 1 : 0) + ((n2 > 0.0) ? 1 : 0);
if (number_within == 1) {
result.n_n = n0 + n1 + n2;
upwindT.Tn = T0.Tn + T1.Tn + T2.Tn;
// if ((iTri == 51243) || (iTri == 43048)) printf("%d : INS-1 nn012 %1.8E %1.8E %1.8E nn %1.10E\n",
// iTri, n0, n1, n2, result.n_n);
} else {
// quick way not upwind:
result.n_n = 0.5*(n0 + n1 + n2);
upwindT.Tn = 0.5*(T0.Tn + T1.Tn + T2.Tn);
// if ((iTri == 51243) || (iTri == 43048)) printf("%d : INS-2 nn012 %1.8E %1.8E %1.8E nn %1.10E\n",
// iTri, n0, n1, n2, result.n_n);
};
if (info.flag == CROSSING_CATH) {
// set n = 0 if position is within cathode rod:
if (!TestDomainPos(info.pos)) {
result.n_n = 0.0;
}
}
} else {
f64 numerator = 0.0;
f64 dot1, dot2;
f64 dot0 = relv.dot(edge_normal0);
dot1 = relv.dot(edge_normal1);
dot2 = relv.dot(edge_normal2);
bool b0, b1, b2; // is this n012 legit?
if (dot0 > 0.0) { b2 = 1; }
else { b1 = 1; };
if (dot1 > 0.0) { b0 = 1; }
else { b2 = 1; };
if (dot2 > 0.0) { b1 = 1; }
else { b0 = 1; };
//Usually now only one of b012 is false.
if (b0 == 0) {
if (b1 == 0) {
result.n_n = n2; // how idk
upwindT.Tn = T2.Tn;
}
else {
if (b2 == 0) { result.n = n1; }
else {
result.n_n = min(n1, n2);
upwindT.Tn = min(T1.Tn, T2.Tn);
}
}
}
else {
if ((b1 == 0) && (b2 == 0)) {
result.n_n = n0;
upwindT.Tn = T0.Tn;
}
else {
if (b1 == 0) {
result.n_n = min(n0, n2);
upwindT.Tn = min(T2.Tn, T0.Tn);
}
else {
if (b2 == 0)
{
result.n_n = min(n0, n1);
upwindT.Tn = min(T1.Tn, T0.Tn);
} else {
result.n_n = min(min(n0, n1), n2);
upwindT.Tn = min(min(T1.Tn, T0.Tn), T2.Tn);
}
}
}
}
// if ((iTri == 51243) || (iTri == 43048)) printf("%d : DOMAIN n012 %1.8E %1.8E %1.8E nn %1.10E\n",
// iTri, n0, n1,n2, result.n_n);
// Look carefully at what happens for CROSSING_INS.
// relv should be horizontal, hence it doesn't give a really low density? CHECK IT IN PRACTICE.
};
} else {
result.n_n = 0.0;
upwindT.Tn = 0.0;
};
p_n_upwind_minor[iTri] = result;
p_T_upwind_minor[iTri] = upwindT;
}*/
/*
__global__ void kernelAccumulateAdvectiveMassHeatRate(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBCtri_verts,
nvals * __restrict__ p_n_src_major,
T3 * __restrict__ p_T_src_major,
nvals * __restrict__ p_n_upwind_minor,
v4 * __restrict__ p_vie_minor,
f64_vec3 * __restrict__ p_v_n_minor,
f64_vec2 * __restrict__ p_v_overall_minor,
//T3 * __restrict__ p_T_minor, // may or may not overlap source: don't we only use from tris? so not overlap
T3 * __restrict__ p_T_upwind_minor,
NTrates * __restrict__ p_NTadditionrates,
f64 * __restrict__ p_div_v,
f64 * __restrict__ p_div_v_n,
f64 * __restrict__ p_Integrated_div_v_overall
)
{
// Use the upwind density from tris together with v_tri.
// Seems to include a factor h
__shared__ f64_vec2 shared_pos[threadsPerTileMinor]; // only reused what, 3 times? 2*2 per major thread
__shared__ nvals shared_n_upwind[threadsPerTileMinor];
__shared__ f64_vec2 shared_vxy[threadsPerTileMinor]; // 2*2 per major thread
__shared__ f64_vec2 shared_v_n[threadsPerTileMinor]; // could split routine; no good reason not to.
//__shared__ f64_vec2 v_overall[threadsPerTileMinor];
// choosing just to load it ad hoc
__shared__ T3 shared_T[threadsPerTileMinor]; // 2*2 ... rightly or wrongly.
// Do neutral after? Necessitates doing all the random loads again.
// Is that worse than loading for each point at the time, a 2-vector v_overall?
// About 6 bus journeys per external point. About 1/4 as many external as internal?
// ^ only 6 because doing ion&neutral together. Changing to do sep could make sense.
// 2* (2+2+2+2+3) = 22
// Max viable threads at 26: 236
// Max viable threads at 24: 256
// Can't store rel v: we use div v of each v in what follows.
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
{
structural info[2];
memcpy(info, p_info_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(structural) * 2);
shared_pos[2 * threadIdx.x] = info[0].pos;
shared_pos[2 * threadIdx.x + 1] = info[1].pos;
memcpy(&(shared_n_upwind[2 * threadIdx.x]),
p_n_upwind_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(nvals) * 2);
v4 vie[2];
memcpy(&vie, p_vie_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(v4) * 2);
shared_vxy[2 * threadIdx.x] = vie[0].vxy;
shared_vxy[2 * threadIdx.x + 1] = vie[1].vxy;
f64_vec3 v_n[2];
memcpy(v_n, p_v_n_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(f64_vec3) * 2);
shared_v_n[2 * threadIdx.x] = v_n[0].xypart();
shared_v_n[2 * threadIdx.x + 1] = v_n[1].xypart();
memcpy(&(shared_T[2 * threadIdx.x]), p_T_upwind_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(T3) * 2);
}
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const EndMinor = threadsPerTileMinor + StartMinor;
__syncthreads();
// What happens for abutting ins?
// T defined reasonably at insulator-crossing tri, A defined, v defined reasonably
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if (info.flag == DOMAIN_VERTEX) {
// T3 Tsrc = p_T_src_major[iVertex]; // UNUSED!
nvals nsrc = p_n_src_major[iVertex];
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
memcpy(izTri, p_izTri + iVertex * MAXNEIGH, sizeof(long) * MAXNEIGH);
memcpy(szPBC, p_szPBCtri_verts + iVertex*MAXNEIGH, sizeof(char)*MAXNEIGH);
// Now we are assuming what? Neigh 0 is below tri 0, so 0 1 are on neigh 0
// Check in debug. Looks true from comments.
short tri_len = info.neigh_len;
f64_vec2 edge_normal, endpt0, endpt1;
f64_vec2 vxy_prev, vxy_next;
f64_vec2 v_n_prev, v_n_next;
f64 n_next, n_prev, nn_next, nn_prev;
f64_vec2 v_overall_prev, v_overall_next;
f64 Te_next, Te_prev, Ti_next, Ti_prev, Tn_next, Tn_prev;
short inext, i = 0;
long iTri = izTri[0];
v_overall_prev = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt0 = shared_pos[iTri - StartMinor];
nvals nvls = shared_n_upwind[iTri - StartMinor];
n_prev = nvls.n;
nn_prev = nvls.n_n;
vxy_prev = shared_vxy[iTri - StartMinor];
v_n_prev = shared_v_n[iTri - StartMinor];
Te_prev = shared_T[iTri - StartMinor].Te;
Ti_prev = shared_T[iTri - StartMinor].Ti;
Tn_prev = shared_T[iTri - StartMinor].Tn;
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt0 = p_info_minor[iTri].pos;
nvals n_upwind = p_n_upwind_minor[iTri];
n_prev = n_upwind.n;
nn_prev = n_upwind.n_n;
vxy_prev = p_vie_minor[iTri].vxy;
v_n_prev = p_v_n_minor[iTri].xypart();
T3 Tuse = p_T_upwind_minor[iTri];
Te_prev = Tuse.Te;
Ti_prev = Tuse.Ti;
Tn_prev = Tuse.Tn;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
endpt0 = Clockwise_d*endpt0;
vxy_prev = Clockwise_d*vxy_prev;
v_n_prev = Clockwise_d*v_n_prev;
v_overall_prev = Clockwise_d*v_overall_prev;
};
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
endpt0 = Anticlockwise_d*endpt0;
vxy_prev = Anticlockwise_d*vxy_prev;
v_n_prev = Anticlockwise_d*v_n_prev;
v_overall_prev = Anticlockwise_d*v_overall_prev;
};
nvals totalmassflux_out;
memset(&totalmassflux_out, 0, sizeof(nvals));
T3 totalheatflux_out;
memset(&totalheatflux_out, 0, sizeof(T3));
f64 Integrated_div_v = 0.0;
f64 Integrated_div_v_n = 0.0;
f64 Integrated_div_v_overall = 0.0;
f64 AreaMajor = 0.0;
#pragma unroll MAXNEIGH
for (i = 0; i < tri_len; i++)
{
inext = i + 1; if (inext == tri_len) inext = 0;
long iTri = izTri[inext];
f64_vec2 v_overall_next = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt1 = shared_pos[iTri - StartMinor];
nvals nvls = shared_n_upwind[iTri - StartMinor];
n_next = nvls.n;
nn_next = nvls.n_n;
vxy_next = shared_vxy[iTri - StartMinor];
v_n_next = shared_v_n[iTri - StartMinor];
Te_next = shared_T[iTri - StartMinor].Te;
Ti_next = shared_T[iTri - StartMinor].Ti;
Tn_next = shared_T[iTri - StartMinor].Tn;
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt1 = p_info_minor[iTri].pos;
nvals n_upwind = p_n_upwind_minor[iTri];
n_next = n_upwind.n;
nn_next = n_upwind.n_n;
vxy_next = p_vie_minor[iTri].vxy;
v_n_next = p_v_n_minor[iTri].xypart();
T3 Tuse = p_T_upwind_minor[iTri];
Te_next = Tuse.Te;
Ti_next = Tuse.Ti;
Tn_next = Tuse.Tn;
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
endpt1 = Clockwise_d*endpt1;
vxy_next = Clockwise_d*vxy_next;
v_n_next = Clockwise_d*v_n_next;
v_overall_next = Clockwise_d*v_overall_next;
};
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
endpt1 = Anticlockwise_d*endpt1;
vxy_next = Anticlockwise_d*vxy_next;
v_n_next = Anticlockwise_d*v_n_next;
v_overall_next = Anticlockwise_d*v_overall_next;
};
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMajor += 0.5*edge_normal.x*(endpt0.x + endpt1.x);
// if (iVertex == CHOSEN) printf("GPU %d : AreaMajor %1.9E edge_nml.x %1.6E endpt0.x %1.6E endpt1.x %1.6E \n",
// iVertex,
// AreaMajor, edge_normal.x, endpt0.x, endpt1.x);
if ((n_prev != 0.0) && (n_next != 0.0)) {
Integrated_div_v_overall += 0.5*(v_overall_prev + v_overall_next).dot(edge_normal); // Average outward velocity of edge...
// The area CAN be changing because of other vertices dragging on it.
// However we can ignore it as n,T should be locally constant near the rod
// anyway.
Integrated_div_v += 0.5*(vxy_prev + vxy_next).dot(edge_normal);
totalmassflux_out.n += 0.5*(n_prev*(vxy_prev - v_overall_prev)
+ n_next*(vxy_next - v_overall_next)).dot(edge_normal);
totalheatflux_out.Ti += 0.5*(n_prev*Ti_prev*(vxy_prev - v_overall_prev)
+ n_next*Ti_next*(vxy_next - v_overall_next)).dot(edge_normal);
totalheatflux_out.Te += 0.5*(n_prev*Te_prev*(vxy_prev - v_overall_prev)
+ n_next*Te_next*(vxy_next - v_overall_next)).dot(edge_normal);
};
if ((nn_prev != 0.0) && (nn_next != 0.0)) {
Integrated_div_v_n += 0.5*(v_n_prev + v_n_next).dot(edge_normal);
totalmassflux_out.n_n += 0.5*(nn_prev*(v_n_prev - v_overall_prev)
+ nn_next*(v_n_next - v_overall_next)).dot(edge_normal);
totalheatflux_out.Tn += 0.5*(nn_prev*Tn_prev*(v_n_prev - v_overall_prev)
+ nn_next*Tn_next*(v_n_next - v_overall_next)).dot(edge_normal);
};
if (TEST1) printf("advect GPU %d : "
"i %d iTri %d heatfluxout_contrib e %1.14E \n"
"nprev %1.14E nnext %1.14E\n"
"Te_prev next %1.14E %1.14E \nrel vxy %1.14E %1.14E ; %1.14E %1.14E\n"
"edge_normal %1.14E %1.14E \n"
"-------------------------\n",
iVertex, i, iTri,
0.5*(n_prev*Te_prev*(vxy_prev - v_overall_prev)
+ n_next*Te_next*(vxy_next - v_overall_next)).dot(edge_normal),
n_prev, n_next,
Te_prev, Te_next, (vxy_prev - v_overall_prev).x, (vxy_prev - v_overall_prev).y,
(vxy_next - v_overall_next).x, (vxy_next - v_overall_next).y,
edge_normal.x, edge_normal.y);
if (TESTADVECT) printf("AccumulateAdvectiveMassHeatRate iVertex %d : inext %d iTri %d \n"
"NTiflux %1.9E cumu %1.9E n_prev %1.9E n_next %1.9E vxyprev %1.7E %1.7E\n"
"vxy_prev.edgenml %1.9E v_overall_prev. %1.9E vxy_next. %1.9E v_overall_next. %1.9E\n"
"Ti_prev %1.9E Ti_next %1.9E prev contrib %1.9E nex cntrib %1.9E\n"
"v_overall_next %1.9E %1.9E | \n"
"------------------------------------------------\n",
iVertex, i, iTri,
0.5*(n_prev*Ti_prev*(vxy_prev - v_overall_prev)
+ n_next*Ti_next*(vxy_next - v_overall_next)).dot(edge_normal),
totalheatflux_out.Ti, n_prev, n_next, vxy_prev.x, vxy_prev.y,
vxy_prev.dot(edge_normal),
v_overall_prev.dot(edge_normal),
vxy_next.dot(edge_normal),
v_overall_next.dot(edge_normal),
Ti_prev, Ti_next,
0.5*n_prev*Ti_prev*(vxy_prev - v_overall_prev).dot(edge_normal),
0.5*n_next*Ti_next*(vxy_next - v_overall_next).dot(edge_normal),
v_overall_next.x, v_overall_next.y
);
if (TESTADVECTNEUT) printf("AccumulateAdvectiveMassHeatRate iVertex %d : inext %d iTri %d \n"
"NnTnflux %1.9E cumu %1.9E nn_prev %1.9E nn_next %1.9E vxyprev %1.7E %1.7E\n"
"vxy_prev.edgenml %1.9E v_overall_prev. %1.9E vxy_next. %1.9E v_overall_next. %1.9E\n"
"Tn_prev %1.9E Tn_next %1.9E prev contrib %1.9E nex cntrib %1.9E\n"
"v_overall_next %1.9E %1.9E\n"
"------------------------------------------------\n",
iVertex, i, iTri,
0.5*(nn_prev*Tn_prev*(v_n_prev - v_overall_prev)
+ nn_next*Tn_next*(v_n_next - v_overall_next)).dot(edge_normal),
totalheatflux_out.Tn, nn_prev, nn_next, v_n_prev.x, v_n_prev.y,
v_n_prev.dot(edge_normal),
v_overall_prev.dot(edge_normal),
v_n_next.dot(edge_normal),
v_overall_next.dot(edge_normal),
Tn_prev, Tn_next,
0.5*nn_prev*Tn_prev*(v_n_prev - v_overall_prev).dot(edge_normal),
0.5*nn_next*Tn_next*(v_n_next - v_overall_next).dot(edge_normal),
v_overall_next.x, v_overall_next.y
);
// if (TEST) printf("advect GPU %d : "
// "i %d iTri %d heatfluxout_contrib %1.14E \n"
// "nprev %1.14E nnext %1.14E\n"
// "Ti_prev next %1.14E %1.14E \nrel vxy %1.14E %1.14E ; %1.14E %1.14E\n"
// "edge_normal %1.14E %1.14E \n"
// "-------------------------\n",
// iVertex, i, iTri,
// 0.5*(n_prev*Ti_prev*(vxy_prev - v_overall_prev)
// + n_next*Ti_next*(vxy_next - v_overall_next)).dot(edge_normal),
// n_prev, n_next,
// Ti_prev, Ti_next, (vxy_prev-v_overall_prev).x, (vxy_prev - v_overall_prev).y,
// (vxy_next - v_overall_next).x, (vxy_next - v_overall_next).y,
// edge_normal.x, edge_normal.y);
if (TEST1) printf("advect GPU %d : "
"i %d iTri %d heatfluxout_contrib e %1.14E \n"
"nprev %1.14E nnext %1.14E\n"
"Te_prev next %1.14E %1.14E \nrel vxy %1.14E %1.14E ; %1.14E %1.14E\n"
"edge_normal %1.14E %1.14E \n"
"-------------------------\n",
iVertex, i, iTri,
0.5*(n_prev*Te_prev*(vxy_prev - v_overall_prev)
+ n_next*Te_next*(vxy_next - v_overall_next)).dot(edge_normal) ,
n_prev, n_next,
Te_prev, Te_next, (vxy_prev - v_overall_prev).x, (vxy_prev - v_overall_prev).y,
(vxy_next - v_overall_next).x, (vxy_next - v_overall_next).y,
edge_normal.x, edge_normal.y);
//
endpt0 = endpt1;
n_prev = n_next;
nn_prev = nn_next;
vxy_prev = vxy_next;
v_n_prev = v_n_next;
v_overall_prev = v_overall_next;
Ti_prev = Ti_next;
Te_prev = Te_next;
Tn_prev = Tn_next;
};
NTrates NTplus;
NTplus.N = -totalmassflux_out.n;
NTplus.Nn = -totalmassflux_out.n_n;
NTplus.NeTe = -totalheatflux_out.Te;
NTplus.NiTi = -totalheatflux_out.Ti;
NTplus.NnTn = -totalheatflux_out.Tn;
//
// if (TEST) printf("\n%d : NTplus.NiTi %1.10E NTplus.N %1.10E Tsrc.i %1.10E nsrc.n %1.10E\n"
// "NTplus.NiTi/NTplus.N (avg temp of those coming/going) %1.10E\n"
// "NTplus.NiTi/N (ROC Ti) %1.10E\n"
// "NTplus.NiTi/NiTi (elasticity of T?) %1.10E \n"
// "NTplus.N/N (elasticity of N) %1.10E \n\n",
// CHOSEN, NTplus.NiTi, NTplus.N,
// Tsrc.Ti, nsrc.n,
// NTplus.NiTi/NTplus.N,
// NTplus.NiTi/(AreaMajor*nsrc.n),
// NTplus.NiTi/(AreaMajor*nsrc.n*Tsrc.Ti),
// NTplus.N/(AreaMajor*nsrc.n)
// );
memcpy(p_NTadditionrates + iVertex, &NTplus, sizeof(NTrates));
// What we need now:
// * Cope with non-domain vertex
p_div_v[iVertex] = Integrated_div_v / AreaMajor;
p_div_v_n[iVertex] = Integrated_div_v_n / AreaMajor;
p_Integrated_div_v_overall[iVertex] = Integrated_div_v_overall;
// if (iVertex == CHOSEN) printf(
// "Chosen: %d Integrated_div_v_n %1.9E p_div_v_n %1.9E \n",
// iVertex, Integrated_div_v_n, p_div_v_n[iVertex]);
// 3 divisions -- could speed up by creating 1.0/AreaMajor. Except it's bus time anyway.
} else {
p_div_v[iVertex] = 0.0;
p_div_v_n[iVertex] = 0.0;
p_Integrated_div_v_overall[iVertex] = 0.0;
};
}*/
__global__ void kernelAccumulateAdvectiveMassHeatRateNew(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBCtri_verts,
long * __restrict__ p_izNeigh_vert,
short * __restrict__ p_who_am_I_to_my_neighbours,
nvals * __restrict__ p_n_src_major,
T3 * __restrict__ p_T_src_major, // use T vertex itself to infer what T to use.
v4 * __restrict__ p_vie_minor,
// f64_vec3 * __restrict__ p_v_n_minor,
f64_vec2 * __restrict__ p_v_overall_minor,
//T3 * __restrict__ p_T_minor, // may or may not overlap source: don't we only use from tris? so not overlap
// ShardModel * __restrict__ p_n_shard_n_major,
ShardModel * __restrict__ p_n_shard_major,
NTrates * __restrict__ p_NTadditionrates,
f64 * __restrict__ p_div_v,
// f64 * __restrict__ p_div_v_n, // write ion & electron routine only first; re-do as neutral.
f64 * __restrict__ p_Integrated_div_v_overall,
NTrates * __restrict__ p_store_flux
)
{
// Use the upwind density from tris together with v_tri.
// Seems to include a factor h
__shared__ f64_vec2 shared_pos[threadsPerTileMinor]; // 4 -- assume we still use this.
// only reused what, 3 times? 2*2 per major thread
// do we
__shared__ f64_vec2 shared_vxy[threadsPerTileMinor]; // +2*2 per major thread
// __shared__ f64_vec2 shared_v_n[threadsPerTileMinor]; // +4
// could split routine; no good reason not to.
//__shared__ f64_vec2 v_overall[threadsPerTileMinor];
// choosing just to load it ad hoc
__shared__ T3 shared_T[threadsPerTileMajor]; // +3 ... = 15 total. Can run 128 threads.
// Should we just pre-average this on tris to make life easier for ourselves? No, because we also need T_opp.
__shared__ f64_12 shared_shards[threadsPerTileMajor];
// probably stick with loading in tri positions if we can.
// Probably can't manage to run this routine with 256 threads at a time. Can't fit 8 doubles / thread to shared.
// 24 doubles/thread to get 256 threads so we still need to chuck some out!
// 48K shared is default.
// scrap v_n, do neutral in sequence.
/////////////////////////////////////////////////////////////////////////////
// Can't store rel v: we use div v of each v in what follows.
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
{
structural info[2];
memcpy(info, p_info_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(structural) * 2);
shared_pos[2 * threadIdx.x] = info[0].pos;
shared_pos[2 * threadIdx.x + 1] = info[1].pos;
v4 vie[2];
memcpy(&vie, p_vie_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(v4) * 2);
shared_vxy[2 * threadIdx.x] = vie[0].vxy;
shared_vxy[2 * threadIdx.x + 1] = vie[1].vxy;
//f64_vec3 v_n[2];
//memcpy(v_n, p_v_n_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(f64_vec3) * 2);
//shared_v_n[2 * threadIdx.x] = v_n[0].xypart();
//shared_v_n[2 * threadIdx.x + 1] = v_n[1].xypart();
// memcpy(&(shared_T[2 * threadIdx.x]), p_T_upwind_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(T3) * 2);
shared_T[threadIdx.x] = p_T_src_major[iVertex];
}
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const EndMinor = threadsPerTileMinor + StartMinor;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMajor = StartMajor + threadsPerTileMajor;
__syncthreads();
// What happens for abutting ins?
// T defined reasonably at insulator-crossing tri, A defined, v defined reasonably
// Now that we have T from vertices, we'll need to define it on INS tri -- gulp. Just use avg of here and opp.
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX)) {
// We do not use for Outermost.
// Trouble is that sometimes a wall is moving and we want outermost to send US the mass.
// Solution: never shift vertices that lie outside a certain radius.
memcpy(&(shared_shards[threadIdx.x].n), &(p_n_shard_major[iVertex].n), MAXNEIGH * sizeof(f64));
// T3 Tsrc = p_T_src_major[iVertex]; // UNUSED!
nvals nsrc = p_n_src_major[iVertex];
// is this USED?
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
memcpy(izTri, p_izTri + iVertex * MAXNEIGH, sizeof(long) * MAXNEIGH);
memcpy(szPBC, p_szPBCtri_verts + iVertex*MAXNEIGH, sizeof(char)*MAXNEIGH);
// Now we are assuming what? Neigh 0 is below tri 0, so 0 1 are on neigh 0
// Check in debug. Looks true from comments.
short tri_len = info.neigh_len;
f64_vec2 edge_normal, endpt0, endpt1;
f64_vec2 vxy_prev, vxy_next;
f64_vec2 v_n_prev, v_n_next;
f64 n_next, n_prev, nn_next, nn_prev;
f64_vec2 v_overall_prev, v_overall_next;
f64 Te_next, Te_prev, Ti_next, Ti_prev, Tn_next, Tn_prev;
short inext, i = 0;
// Initial scenario: use triangle 0 & triangle 1. These face at neighbour 1. prev neigh = 0.
// Notice that for OUTERMOST we can make no such assumption --- the opposite holds.
long iNeigh = p_izNeigh_vert[iVertex*MAXNEIGH + 0];
if ((iNeigh >= StartMajor) && (iNeigh < EndMajor)) {
Ti_prev = shared_T[iNeigh - StartMajor].Ti;
Te_prev = shared_T[iNeigh - StartMajor].Te;
} else {
T3 Tload = p_T_src_major[iNeigh];
Ti_prev = Tload.Ti;
Te_prev = Tload.Te;
};
if (Ti_prev == 0.0) Ti_prev = shared_T[threadIdx.x].Ti;
if (Te_prev == 0.0) Te_prev = shared_T[threadIdx.x].Te;
long iTri = izTri[0];
v_overall_prev = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt0 = shared_pos[iTri - StartMinor];
//nvals nvls = shared_n_upwind[iTri - StartMinor];
//n_prev = nvls.n;
//nn_prev = nvls.n_n;
vxy_prev = shared_vxy[iTri - StartMinor];
//v_n_prev = shared_v_n[iTri - StartMinor];
//Te_prev = shared_T[iTri - StartMinor].Te;
//Ti_prev = shared_T[iTri - StartMinor].Ti;
//Tn_prev = shared_T[iTri - StartMinor].Tn;
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt0 = p_info_minor[iTri].pos;
//nvals n_upwind = p_n_upwind_minor[iTri];
//n_prev = n_upwind.n;
//nn_prev = n_upwind.n_n;
vxy_prev = p_vie_minor[iTri].vxy;
//v_n_prev = p_v_n_minor[iTri].xypart();
//T3 Tuse = p_T_upwind_minor[iTri];
//Te_prev = Tuse.Te;
//Ti_prev = Tuse.Ti;
//Tn_prev = Tuse.Tn;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
endpt0 = Clockwise_d*endpt0;
vxy_prev = Clockwise_d*vxy_prev;
//v_n_prev = Clockwise_d*v_n_prev;
v_overall_prev = Clockwise_d*v_overall_prev;
};
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
endpt0 = Anticlockwise_d*endpt0;
vxy_prev = Anticlockwise_d*vxy_prev;
//v_n_prev = Anticlockwise_d*v_n_prev;
v_overall_prev = Anticlockwise_d*v_overall_prev;
};
// We're going to need position of out vertex?
nvals totalmassflux_out;
memset(&totalmassflux_out, 0, sizeof(nvals));
T3 totalheatflux_out;
memset(&totalheatflux_out, 0, sizeof(T3)); // we're only going to use ion and electron
f64 Integrated_div_v = 0.0;
// f64 Integrated_div_v_n = 0.0;
f64 Integrated_div_v_overall = 0.0;
f64 AreaMajor = 0.0;
f64 Ti_opp, Te_opp;
iNeigh = p_izNeigh_vert[iVertex*MAXNEIGH + 1]; // neigh 0 is between 0 and 1, neigh -1 is before tri 0.
if ((iNeigh >= StartMajor) && (iNeigh < EndMajor)) {
Ti_opp = shared_T[iNeigh - StartMajor].Ti;
Te_opp = shared_T[iNeigh - StartMajor].Te;
}
else {
T3 Tload = p_T_src_major[iNeigh];
Ti_opp = Tload.Ti;
Te_opp = Tload.Te;
};
if (Ti_opp == 0.0) Ti_opp = shared_T[threadIdx.x].Ti;
if (Te_opp == 0.0) Te_opp = shared_T[threadIdx.x].Te;
#pragma unroll MAXNEIGH
for (i = 0; i < tri_len; i++)
{
inext = i + 1; if (inext == tri_len) inext = 0; // i,inext are the triangle indices
// Let's assume inext is the index of iNeigh but we should spit out lists to check this.
short inext2 = inext + 1; if (inext2 == tri_len) inext2 = 0;
long iNeighNext = p_izNeigh_vert[iVertex*MAXNEIGH + inext2]; // neigh0 is between 0 and 1, neigh -1 is before tri 0.
if ((iNeighNext >= StartMajor) && (iNeighNext < EndMajor)) {
Ti_next = shared_T[iNeighNext - StartMajor].Ti;
Te_next = shared_T[iNeighNext - StartMajor].Te;
}
else {
T3 Tload = p_T_src_major[iNeighNext]; // it's actually use not src.
Ti_next = Tload.Ti;
Te_next = Tload.Te;
};
if (Ti_next == 0.0) Ti_next = shared_T[threadIdx.x].Ti;
if (Te_next == 0.0) Te_next = shared_T[threadIdx.x].Te;
long iTri = izTri[inext];
f64_vec2 v_overall_next = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt1 = shared_pos[iTri - StartMinor];
vxy_next = shared_vxy[iTri - StartMinor];
// We are going to need a separate attempt to get at T from vertices, to use in Simpson.
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt1 = p_info_minor[iTri].pos;
vxy_next = p_vie_minor[iTri].vxy;
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
endpt1 = Clockwise_d*endpt1;
vxy_next = Clockwise_d*vxy_next;
v_n_next = Clockwise_d*v_n_next;
v_overall_next = Clockwise_d*v_overall_next;
};
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
endpt1 = Anticlockwise_d*endpt1;
vxy_next = Anticlockwise_d*vxy_next;
v_n_next = Anticlockwise_d*v_n_next;
v_overall_next = Anticlockwise_d*v_overall_next;
};
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMajor += 0.5*edge_normal.x*(endpt0.x + endpt1.x);
// if (iVertex == CHOSEN) printf("GPU %d : AreaMajor %1.9E edge_nml.x %1.6E endpt0.x %1.6E endpt1.x %1.6E \n",
// iVertex,
// AreaMajor, edge_normal.x, endpt0.x, endpt1.x);
n_prev = shared_shards[threadIdx.x].n[i];
n_next = shared_shards[threadIdx.x].n[inext];
// Totally smash up the runtime:
char neighflag = p_info_minor[iNeigh + BEGINNING_OF_CENTRAL].flag;
if ((neighflag == DOMAIN_VERTEX) ||
((info.flag == DOMAIN_VERTEX) && (neighflag == OUTERMOST)))
{
// Note: changes of cell area looking into ins / cath are not valid changes of area.
if ((n_prev != 0.0) && (n_next != 0.0)) {
Integrated_div_v_overall += 0.5*(v_overall_prev + v_overall_next).dot(edge_normal); // Average outward velocity of edge...
// The area CAN be changing because of other vertices dragging on it.
// However we can ignore it as n,T should be locally constant near the rod anyway.
Integrated_div_v += 0.5*(vxy_prev + vxy_next).dot(edge_normal);
f64 prev_relv = (vxy_prev - v_overall_prev).dot(edge_normal);
f64 next_relv = (vxy_next - v_overall_next).dot(edge_normal);
if (iVertex == VERTCHOSEN) printf("%d : i = %d %d , contrib to integ div v overall %1.9E v_overall prev %1.9E %1.9E\n"
"prev next dot normal %1.9E %1.9E\n",
iVertex, i, iNeigh, 0.5*(v_overall_prev + v_overall_next).dot(edge_normal),
v_overall_prev.x, v_overall_prev.y, v_overall_prev.dot(edge_normal),
v_overall_next.dot(edge_normal));
// Insulator: If 1 or 2 of the vertices comes out at T=0 then what?
// Fill in with our own value. Upwind.
// But absolutely ensure we are not looking out of domain at vertex! And we are throwing away anything that flowed into OUTERMOST. Too bad about that, leave it.
// Without loading info for the vertex we look at, we do not know if it's out of domain. So we have to rely on vr=0 at insulator.
// upwind? :
if (prev_relv + next_relv > 0.0) {
// For now:
f64 Ti_prevavg = THIRD*(shared_T[threadIdx.x].Ti + Ti_prev + Ti_opp);
f64 Ti_nextavg = THIRD*(shared_T[threadIdx.x].Ti + Ti_next + Ti_opp);
f64 Te_prevavg = THIRD*(shared_T[threadIdx.x].Te + Te_prev + Te_opp);
f64 Te_nextavg = THIRD*(shared_T[threadIdx.x].Te + Te_next + Te_opp);
// LIMIT flux T to 2*ours.
if (Ti_prevavg > 2.0*shared_T[threadIdx.x].Ti) Ti_prevavg = 2.0*shared_T[threadIdx.x].Ti;
if (Ti_nextavg > 2.0*shared_T[threadIdx.x].Ti) Ti_nextavg = 2.0*shared_T[threadIdx.x].Ti;
if (Te_prevavg > 2.0*shared_T[threadIdx.x].Te) Te_prevavg = 2.0*shared_T[threadIdx.x].Te;
if (Te_nextavg > 2.0*shared_T[threadIdx.x].Te) Te_nextavg = 2.0*shared_T[threadIdx.x].Te;
f64 Ti_avg = 0.5*(shared_T[threadIdx.x].Ti + Ti_opp);
f64 Te_avg = 0.5*(shared_T[threadIdx.x].Te + Te_opp);
if (Ti_avg > 2.0*shared_T[threadIdx.x].Ti) Ti_avg = 2.0*shared_T[threadIdx.x].Ti;
if (Te_avg > 2.0*shared_T[threadIdx.x].Te) Te_avg = 2.0*shared_T[threadIdx.x].Te;
NTrates NTaddn;
memset(&NTaddn, 0, sizeof(NTrates));
NTaddn.N = 0.25*n_prev*prev_relv + 0.25*n_next*next_relv
+ 0.5*0.25*(n_prev + n_next)*(prev_relv + next_relv);
NTaddn.NiTi = 0.25*n_prev*Ti_prevavg*prev_relv + 0.25*n_next*Ti_nextavg*next_relv
+ 0.125*(n_prev + n_next)*Ti_avg*(prev_relv + next_relv);
NTaddn.NeTe = 0.25*n_prev*Te_prevavg*prev_relv + 0.25*n_next*Te_nextavg*next_relv
+ 0.125*(n_prev + n_next)*Te_avg*(prev_relv + next_relv);
totalmassflux_out.n += NTaddn.N;
totalheatflux_out.Ti += NTaddn.NiTi;
totalheatflux_out.Te += NTaddn.NeTe;
// Maybe there's a speedup we can use.
// Now save to downwind cell:
short who_am_I = p_who_am_I_to_my_neighbours[iVertex*MAXNEIGH + inext];
memcpy(&(p_store_flux[iNeigh*MAXNEIGH + who_am_I]), &(NTaddn), sizeof(NTrates));
// NOTE WE DID N*O*T ADD A MINUS.
if (((iNeigh == VERTCHOSEN) || (iVertex == VERTCHOSEN)) && (TEST_ADV_HEAT_FLAG)) {
printf("iVertex %d NTaddn.NiTi %1.9E n_prev %1.9E Ti_prevavg %1.9E prev_relv %1.9E \n"
"n_next %1.9E Ti_nextavg %1.9E next_relv %1.9E Ti_avg %1.9E \n"
"totalheatflux_out.Ti %1.9E i %d iNeigh %d who_am_I %d\n"
"Ti_ours %1.8E Ti_opp %1.8E \n"
"---------------------------------------------------\n",
iVertex, NTaddn.NiTi, n_prev, Ti_prevavg, prev_relv, n_next, Ti_nextavg, next_relv,
Ti_avg, totalheatflux_out.Ti, i, iNeigh, who_am_I,
shared_T[threadIdx.x].Ti, Ti_opp);
}
if (((iNeigh == VERTCHOSEN) || (iVertex == VERTCHOSEN)) && (TEST_ADV_MASS_FLAG)) {
printf("iVertex %d iNeigh %d massflux_out %1.9E NTaddn.N %1.9E \n"
"n_prev n_next %1.9E %1.9E prev_relv next_relv %1.9E %1.9E \n"
"vxy_prev %1.8E %1.8E vxy_next %1.9E %1.9E edge_normal %1.8E %1.8E \n"
"v_overall prev %1.8E %1.8E next %1.8E %1.8E \n&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n",
iVertex, iNeigh, totalmassflux_out.n, NTaddn.N,
n_prev, n_next, prev_relv, next_relv,
vxy_prev.x, vxy_prev.y, vxy_next.x, vxy_next.y, edge_normal.x, edge_normal.y,
v_overall_prev.x, v_overall_prev.y, v_overall_next.x, v_overall_next.y);
};
}
else {
// downwind cell: collect flux later.
}
};
};
endpt0 = endpt1;
vxy_prev = vxy_next;
v_overall_prev = v_overall_next;
Ti_prev = Ti_opp;
Ti_opp = Ti_next;
Te_prev = Te_opp;
Te_opp = Te_next;
iNeigh = iNeighNext;
};
NTrates NTplus;
NTplus.N = -totalmassflux_out.n;
NTplus.Nn = 0.0; // -totalmassflux_out.n_n;
NTplus.NeTe = -totalheatflux_out.Te;
NTplus.NiTi = -totalheatflux_out.Ti;
NTplus.NnTn = 0.0; // -totalheatflux_out.Tn;
//
// if (TEST) printf("\n%d : NTplus.NiTi %1.10E NTplus.N %1.10E Tsrc.i %1.10E nsrc.n %1.10E\n"
// "NTplus.NiTi/NTplus.N (avg temp of those coming/going) %1.10E\n"
// "NTplus.NiTi/N (ROC Ti) %1.10E\n"
// "NTplus.NiTi/NiTi (elasticity of T?) %1.10E \n"
// "NTplus.N/N (elasticity of N) %1.10E \n\n",
// CHOSEN, NTplus.NiTi, NTplus.N,
// Tsrc.Ti, nsrc.n,
// NTplus.NiTi/NTplus.N,
// NTplus.NiTi/(AreaMajor*nsrc.n),
// NTplus.NiTi/(AreaMajor*nsrc.n*Tsrc.Ti),
// NTplus.N/(AreaMajor*nsrc.n)
// );
memcpy(p_NTadditionrates + iVertex, &NTplus, sizeof(NTrates));
// ROUTINE MUST BE CALLED FIRST - WE ZEROED OUT NEUTRAL DATA.
// What we need now:
// * Cope with non-domain vertex
p_div_v[iVertex] = Integrated_div_v / AreaMajor;
p_Integrated_div_v_overall[iVertex] = Integrated_div_v_overall;
}
else {
p_div_v[iVertex] = 0.0;
p_Integrated_div_v_overall[iVertex] = 0.0;
};
}
__global__ void kernelAccumulateNeutralAdvectiveMassHeatRateNew(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBCtri_verts,
long * __restrict__ p_izNeigh_vert,
short * __restrict__ p_who_am_I_to_my_neighbours,
nvals * __restrict__ p_n_src_major,
T3 * __restrict__ p_T_src_major, // use T vertex itself to infer what T to use.
f64_vec3 * __restrict__ p_v_n_minor,
f64_vec2 * __restrict__ p_v_overall_minor,
ShardModel * __restrict__ p_n_shard_major,
NTrates * __restrict__ p_NTadditionrates,
f64 * __restrict__ p_div_v_n,
NTrates * __restrict__ p_store_flux
)
{
// Use the upwind density from tris together with v_tri.
// Seems to include a factor h
__shared__ f64_vec2 shared_pos[threadsPerTileMinor]; // 4 -- assume we still use this.
// only reused what, 3 times? 2*2 per major thread
// do we
__shared__ f64_vec2 shared_vxy[threadsPerTileMinor]; // +2*2 per major thread
// __shared__ f64_vec2 shared_v_n[threadsPerTileMinor]; // +4
// could split routine; no good reason not to.
//__shared__ f64_vec2 v_overall[threadsPerTileMinor];
// choosing just to load it ad hoc
__shared__ f64 shared_T[threadsPerTileMajor]; // +3 ... = 15 total. Can run 128 threads.
// Should we just pre-average this on tris to make life easier for ourselves? No, because we also need T_opp.
__shared__ f64_12 shared_shards[threadsPerTileMajor];
// probably stick with loading in tri positions if we can.
// Probably can't manage to run this routine with 256 threads at a time. Can't fit 8 doubles / thread to shared.
// 24 doubles/thread to get 256 threads so we still need to chuck some out!
// 48K shared is default.
// scrap v_n, do neutral in sequence.
/////////////////////////////////////////////////////////////////////////////
// Can't store rel v: we use div v of each v in what follows.
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
{
structural info[2];
memcpy(info, p_info_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(structural) * 2);
shared_pos[2 * threadIdx.x] = info[0].pos;
shared_pos[2 * threadIdx.x + 1] = info[1].pos;
f64_vec3 v_n[2];
memcpy(&v_n, p_v_n_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(f64_vec3) * 2);
shared_vxy[2 * threadIdx.x] = v_n[0].xypart();
shared_vxy[2 * threadIdx.x + 1] = v_n[1].xypart();
shared_T[threadIdx.x] = p_T_src_major[iVertex].Tn;
}
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const EndMinor = threadsPerTileMinor + StartMinor;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMajor = StartMajor + threadsPerTileMajor;
__syncthreads();
// What happens for abutting ins?
// T defined reasonably at insulator-crossing tri, A defined, v defined reasonably
// Now that we have T from vertices, we'll need to define it on INS tri -- gulp. Just use avg of here and opp.
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX)) {
// see above -- no shift vertex outside certain radius
memcpy(&(shared_shards[threadIdx.x].n), &(p_n_shard_major[iVertex].n), MAXNEIGH * sizeof(f64));
// T3 Tsrc = p_T_src_major[iVertex]; // UNUSED!
nvals nsrc = p_n_src_major[iVertex];
// is this USED?
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
memcpy(izTri, p_izTri + iVertex * MAXNEIGH, sizeof(long) * MAXNEIGH);
memcpy(szPBC, p_szPBCtri_verts + iVertex*MAXNEIGH, sizeof(char)*MAXNEIGH);
// Now we are assuming what? Neigh 0 is below tri 0, so 0 1 are on neigh 0
// Check in debug. Looks true from comments.
short tri_len = info.neigh_len;
f64_vec2 edge_normal, endpt0, endpt1;
f64_vec2 vxy_prev, vxy_next;
f64_vec2 v_n_prev, v_n_next;
f64 n_next, n_prev, nn_next, nn_prev;
f64_vec2 v_overall_prev, v_overall_next;
f64 T_next, T_prev;
short inext, i = 0;
// Initial scenario: use triangle 0 & triangle 1. These face at neighbour 1. prev neigh = 0.
long iNeigh = p_izNeigh_vert[iVertex*MAXNEIGH + 0];
if ((iNeigh >= StartMajor) && (iNeigh < EndMajor)) {
T_prev = shared_T[iNeigh - StartMajor];
} else {
T_prev = p_T_src_major[iNeigh].Tn;
};
if (T_prev == 0.0) T_prev = shared_T[threadIdx.x];
long iTri = izTri[0];
v_overall_prev = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt0 = shared_pos[iTri - StartMinor];
//nvals nvls = shared_n_upwind[iTri - StartMinor];
//n_prev = nvls.n;
//nn_prev = nvls.n_n;
vxy_prev = shared_vxy[iTri - StartMinor];
//v_n_prev = shared_v_n[iTri - StartMinor];
//Te_prev = shared_T[iTri - StartMinor].Te;
//Ti_prev = shared_T[iTri - StartMinor].Ti;
//Tn_prev = shared_T[iTri - StartMinor].Tn;
}
else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt0 = p_info_minor[iTri].pos;
//nvals n_upwind = p_n_upwind_minor[iTri];
//n_prev = n_upwind.n;
//nn_prev = n_upwind.n_n;
vxy_prev = p_v_n_minor[iTri].xypart();
//v_n_prev = p_v_n_minor[iTri].xypart();
//T3 Tuse = p_T_upwind_minor[iTri];
//Te_prev = Tuse.Te;
//Ti_prev = Tuse.Ti;
//Tn_prev = Tuse.Tn;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
endpt0 = Clockwise_d*endpt0;
vxy_prev = Clockwise_d*vxy_prev;
//v_n_prev = Clockwise_d*v_n_prev;
v_overall_prev = Clockwise_d*v_overall_prev;
};
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
endpt0 = Anticlockwise_d*endpt0;
vxy_prev = Anticlockwise_d*vxy_prev;
//v_n_prev = Anticlockwise_d*v_n_prev;
v_overall_prev = Anticlockwise_d*v_overall_prev;
};
// We're going to need position of out vertex?
f64 totalmassflux_out;
memset(&totalmassflux_out, 0, sizeof(f64));
f64 totalheatflux_out;
memset(&totalheatflux_out, 0, sizeof(f64));
f64 Integrated_div_v = 0.0;
f64 AreaMajor = 0.0;
f64 T_opp;
iNeigh = p_izNeigh_vert[iVertex*MAXNEIGH + 1]; // neigh0 is between 0 and 1, neigh -1 is before tri 0.
if ((iNeigh >= StartMajor) && (iNeigh < EndMajor)) {
T_opp = shared_T[iNeigh - StartMajor];
} else {
T_opp = p_T_src_major[iNeigh].Tn;
};
if (T_opp == 0.0) T_opp = shared_T[threadIdx.x];
#pragma unroll MAXNEIGH
for (i = 0; i < tri_len; i++)
{
inext = i + 1; if (inext == tri_len) inext = 0; // i,inext are the triangle indices
// Let's assume inext is the index of iNeigh but we should spit out lists to check this.
short inext2 = inext + 1; if (inext2 == tri_len) inext2 = 0;
long iNeighNext = p_izNeigh_vert[iVertex*MAXNEIGH + inext2]; // neigh0 is between 0 and 1, neigh -1 is before tri 0.
if ((iNeighNext >= StartMajor) && (iNeighNext < EndMajor)) {
T_next = shared_T[iNeighNext - StartMajor];
}
else {
T_next = p_T_src_major[iNeighNext].Tn; // it's actually use not src.
};
if (T_next == 0.0) T_next = shared_T[threadIdx.x];
long iTri = izTri[inext];
f64_vec2 v_overall_next = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt1 = shared_pos[iTri - StartMinor];
vxy_next = shared_vxy[iTri - StartMinor];
// Te_next = shared_T[iTri - StartMinor].Te;
// Ti_next = shared_T[iTri - StartMinor].Ti;
// Tn_next = shared_T[iTri - StartMinor].Tn;
// We are going to need a separate attempt to get at T from vertices, to use in Simpson.
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt1 = p_info_minor[iTri].pos;
vxy_next = p_v_n_minor[iTri].xypart();
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
endpt1 = Clockwise_d*endpt1;
vxy_next = Clockwise_d*vxy_next;
v_overall_next = Clockwise_d*v_overall_next;
};
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
endpt1 = Anticlockwise_d*endpt1;
vxy_next = Anticlockwise_d*vxy_next;
v_overall_next = Anticlockwise_d*v_overall_next;
};
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMajor += 0.5*edge_normal.x*(endpt0.x + endpt1.x);
// if (iVertex == CHOSEN) printf("GPU %d : AreaMajor %1.9E edge_nml.x %1.6E endpt0.x %1.6E endpt1.x %1.6E \n",
// iVertex,
// AreaMajor, edge_normal.x, endpt0.x, endpt1.x);
n_prev = shared_shards[threadIdx.x].n[i];
n_next = shared_shards[threadIdx.x].n[inext];
// Totally smash up the runtime:
char neighflag = p_info_minor[iNeigh + BEGINNING_OF_CENTRAL].flag;
if ((neighflag == DOMAIN_VERTEX) ||
((info.flag == DOMAIN_VERTEX) && (neighflag == OUTERMOST)))
{
if ((n_prev != 0.0) && (n_next != 0.0)) {
Integrated_div_v += 0.5*(vxy_prev + vxy_next).dot(edge_normal);
f64 prev_relv = (vxy_prev - v_overall_prev).dot(edge_normal);
f64 next_relv = (vxy_next - v_overall_next).dot(edge_normal);
// Insulator: If 1 or 2 of the vertices comes out at T=0 then what?
// Fill in with our own value. Upwind.
// But absolutely ensure we are not looking out of domain at vertex! And we are throwing away anything that flowed into OUTERMOST. Too bad about that, leave it.
// Without loading info for the vertex we look at, we do not know if it's out of domain. So we have to rely on vr=0 at insulator.
// upwind? :
if (prev_relv + next_relv > 0.0) {
// Note: you are not upwind for both neutrals & ions necessarily.
// For now:
f64 T_prevavg = THIRD*(shared_T[threadIdx.x] + T_prev + T_opp);
f64 T_nextavg = THIRD*(shared_T[threadIdx.x] + T_next + T_opp);
// LIMIT flux T to 2*ours.
if (T_prevavg > 2.0*shared_T[threadIdx.x]) T_prevavg = 2.0*shared_T[threadIdx.x];
if (T_nextavg > 2.0*shared_T[threadIdx.x]) T_nextavg = 2.0*shared_T[threadIdx.x];
f64 T_avg = 0.5*(shared_T[threadIdx.x] + T_opp);
if (T_avg > 2.0*shared_T[threadIdx.x]) T_avg = 2.0*shared_T[threadIdx.x];
f64 NTaddnN, NTaddnNT;
NTaddnN = 0.25*n_prev*prev_relv + 0.25*n_next*next_relv
+ 0.5*0.25*(n_prev + n_next)*(prev_relv + next_relv);
NTaddnNT = 0.25*n_prev*T_prevavg*prev_relv + 0.25*n_next*T_nextavg*next_relv
+ 0.125*(n_prev + n_next)*T_avg*(prev_relv + next_relv);
totalmassflux_out += NTaddnN;
totalheatflux_out += NTaddnNT;
// Maybe there's a speedup we can use.
// Now save to downwind cell:
short who_am_I = p_who_am_I_to_my_neighbours[iVertex*MAXNEIGH + inext];
p_store_flux[iNeigh*MAXNEIGH + who_am_I].Nn = NTaddnN;
p_store_flux[iNeigh*MAXNEIGH + who_am_I].NnTn = NTaddnNT;
// NOTE WE DID N*O*T ADD A MINUS.
// if (((iNeigh == VERTCHOSEN) || (iVertex == VERTCHOSEN)) && (TEST_ADV_MASS_FLAG)) {
// printf("iVertex %d iNeigh %d massflux_out %1.9E NTaddn.Nn %1.9E \n"
// "nn_prev nn_next %1.9E %1.9E prev_relv next_relv %1.9E %1.9E \n"
// "vxy_prev %1.8E %1.8E vxy_next %1.9E %1.9E edge_normal %1.8E %1.8E \n"
// "v_overall prev %1.8E %1.8E next %1.8E %1.8E who_am_I %d \n&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n",
// iVertex, iNeigh, totalmassflux_out, NTaddnN,
// n_prev, n_next, prev_relv, next_relv,
// vxy_prev.x, vxy_prev.y, vxy_next.x, vxy_next.y, edge_normal.x, edge_normal.y,
// v_overall_prev.x, v_overall_prev.y, v_overall_next.x, v_overall_next.y, who_am_I);
// };
}
else {
// downwind cell: collect flux later.
};
};
};
endpt0 = endpt1;
vxy_prev = vxy_next;
v_overall_prev = v_overall_next;
T_prev = T_opp;
T_opp = T_next;
iNeigh = iNeighNext;
};
p_NTadditionrates[iVertex].Nn = -totalmassflux_out;
p_NTadditionrates[iVertex].NnTn = -totalheatflux_out;
p_div_v_n[iVertex] = Integrated_div_v / AreaMajor;
}
else {
p_div_v_n[iVertex] = 0.0;
};
}
__global__ void kernelCreateLinearRelationship(
f64 const h_use,
structural * __restrict__ p_info,
OhmsCoeffs* __restrict__ p_Ohms,
v4 * __restrict__ p_v0,
f64 * __restrict__ p_Lap_Az_use,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_coeff_of_vez_upon_viz,
f64 * __restrict__ p_beta_ie_z,
AAdot * __restrict__ p_AAdot_intermediate,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma
)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 const Lap_Az_used = p_Lap_Az_use[iMinor];
structural const info = p_info[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX))
{
v4 v0 = p_v0[iMinor];
// Cancel the part that was added in order to get at Ez_strength:
f64 denom_e = p_denom_e[iMinor];
f64 denom_i = p_denom_i[iMinor];
if (((TESTTRI)) && (0)) printf("\nv0.vez before remove Lapcontrib %1.14E \n", v0.vez);
v0.viz += 0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i; // adaptation for this.
f64 coeff_of_vez_upon_viz = p_coeff_of_vez_upon_viz[iMinor];
f64 cancel_from_vez = -0.5*eoverm*h_use*h_use* c* Lap_Az_used / denom_e
+ coeff_of_vez_upon_viz * 0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i;
v0.vez += cancel_from_vez;
f64 beta_ie_z = p_beta_ie_z[iMinor];
v0.viz += beta_ie_z * cancel_from_vez;
if (((TESTTRI)) && (0)) printf("\n##############\nviz before remove LapAzcontrib %1.14E Lapcontrib %1.14E \n\n",
v0.viz - 0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i,
-0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i
);
// Inadequate because we need to take account of the effect of Lap Az on vez0 via viz0.
// We see now that re-jigging things is absolutely not what we should have done.
// It will make the most complicated overspilling routine, more complicated still.
if (((TESTTRI)) && (0)) printf("own part of effect (we cancel): %1.14E \n"
"via viz (we cancel): coeff %1.14E vizeffect %1.14E\n",
0.5*eoverm*h_use*h_use* c* Lap_Az_used / denom_e,
coeff_of_vez_upon_viz,
-0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i);
if (((TESTTRI)) && (0)) printf("v0.vez after remove Lapcontrib %1.14E \n", v0.vez);
OhmsCoeffs Ohms = p_Ohms[iMinor];
f64 vez_1 = v0.vez + Ohms.sigma_e_zz * Ez_strength;
f64 viz_1 = v0.viz + Ohms.sigma_i_zz * Ez_strength;
if (((TESTTRI)) && (0)) printf("vez_1 with Ezcontrib %1.14E sigma_e_zz %1.14E Ez %1.14E vizeffect %1.14E \n", vez_1,
Ohms.sigma_e_zz, Ez_strength, Ohms.sigma_i_zz * Ez_strength);
// Cancelled Lap contrib from vez1 here.
// Be sure we know that makes sense. Is that what we missed on CPU?
nvals n_use = p_n_minor[iMinor];
// AAzdot_k.Azdot +=
// h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*n_use.n*(vie_k.viz - vie_k.vez)); // INTERMEDIATE
// p_AAdot_intermediate[iMinor] = AAzdot_k; // not k any more
#ifdef MIDPT_A
p_Azdot0[iMinor] = p_AAdot_intermediate[iMinor].Azdot
- 0.5*h_use*c*c*Lap_Az_used // cancel out half what PopOhms did!
// + h_use * ROCAzdot_antiadvect[iMinor] // we did this as part of PopOhms.
// + h_use *c*2.0*PI* q*n_use.n*(v_src.viz - v_src.vez) // we did this as part of PopOhms
+ h_use *c*2.0*M_PI* q*n_use.n*(viz_1 - vez_1);
// HALVED:
f64 viz0_coeff_on_Lap_Az = -0.25*h_use*h_use*qoverM*c / denom_i;
f64 vez0_coeff_on_Lap_Az = 0.25* h_use*h_use*eoverm*c / denom_e
+ coeff_of_vez_upon_viz*viz0_coeff_on_Lap_Az;
#else
p_Azdot0[iMinor] = p_AAdot_intermediate[iMinor].Azdot
- h_use*c*c*Lap_Az_used // cancel out what PopOhms did!
// + h_use * ROCAzdot_antiadvect[iMinor] // we did this as part of PopOhms.
// + h_use *c*2.0*PI* q*n_use.n*(v_src.viz - v_src.vez) // we did this as part of PopOhms
+ h_use *c*2.0*M_PI* q*n_use.n*(viz_1 - vez_1);
f64 viz0_coeff_on_Lap_Az = -0.5*h_use*h_use*qoverM*c / denom_i;
f64 vez0_coeff_on_Lap_Az = 0.5* h_use*h_use*eoverm*c / denom_e
+ coeff_of_vez_upon_viz*viz0_coeff_on_Lap_Az;
#endif
viz0_coeff_on_Lap_Az += beta_ie_z*vez0_coeff_on_Lap_Az;
if (((TESTTRI)) && (0)) printf("vez0_coeff_on_Lap undivided %1.14E coeff_viz_on_vez %1.14E viz0_coeff %1.14E denom_e %1.14E\n",
0.5* h_use*h_use*eoverm*c,
coeff_of_vez_upon_viz,
viz0_coeff_on_Lap_Az,
denom_e
);
#ifdef MIDPT_A
p_gamma[iMinor] = h_use*c*c*(0.5 + 0.5*FOURPI_OVER_C * q*n_use.n*
(viz0_coeff_on_Lap_Az - vez0_coeff_on_Lap_Az));
#else
p_gamma[iMinor] = h_use*c*c*(1.0 + 0.5*FOURPI_OVER_C * q*n_use.n*
(viz0_coeff_on_Lap_Az - vez0_coeff_on_Lap_Az));
#endif
// This represents the effect on Azdot of LapAz.
// Did we get this wrong for CPU also?
if (((TESTTRI)) && (0)) {
printf("kernelCLR %d: Azdot_intermed %1.14E Lap_Az_used %1.14E Lapcontrib cancel %1.14E Azdot0 %1.14E\n",
CHOSEN, p_AAdot_intermediate[iMinor].Azdot, Lap_Az_used,
-h_use*c*c*Lap_Az_used,
p_Azdot0[iMinor]);
printf("Jcontrib1 %1.14E viz1 %1.14E vez1 %1.14E\n",
h_use *c*2.0*M_PI* q*n_use.n*(viz_1 - vez_1),
viz_1, vez_1);
printf("gamma %1.14E components: n %1.14E viz0coeff %1.14E vez0coeff %1.14E",
p_gamma[iMinor],
n_use.n, viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az);
}
}
else {
// In PopOhms:
// AAdot temp = p_AAdot_src[iMinor];
// temp.Azdot += h_use * c*(c*p_LapAz[iMinor]
// NO: + 4.0*PI*Jz);
// p_AAdot_intermediate[iMinor] = temp; //
// We need to do the same sort of thing here as in CalcVelocityAzdot :
f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
if (info.flag == REVERSE_JZ_TRI)
{
// We should find a way to set these to exactly what we need for it to work,
// at TriMesh::Initialise and then propagated through the Invoke function.
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
}
#ifdef MIDPT_A
p_Azdot0[iMinor] = p_AAdot_intermediate[iMinor].Azdot - h_use*0.5*c*c*Lap_Az_used
+ h_use*c*FOUR_PI*Jz;
p_gamma[iMinor] = h_use*0.5 * c*c;
#else
p_Azdot0[iMinor] = p_AAdot_intermediate[iMinor].Azdot - h_use*c*c*Lap_Az_used
+ h_use*c*FOUR_PI*Jz;
p_gamma[iMinor] = h_use * c*c;
#endif
if ((info.flag == INNER_FRILL) || (info.flag == OUTER_FRILL))
{
p_Azdot0[iMinor] = 0.0; // difference found? But we did set = 0 on CPU.
p_gamma[iMinor] = 0.0;
}
if (((TESTTRI)) && (0)) printf("kernelCLR %d: Azdot_intermed %1.14E Lap_Az_used %1.14E Azdot0 %1.14E\n",
CHOSEN, p_AAdot_intermediate[iMinor].Azdot, Lap_Az_used, p_Azdot0[iMinor]);
// Note that for frills these will simply not be used.
};
}
__global__ void kernelCreateLinearRelationshipBwd(
f64 const h_use,
structural * __restrict__ p_info,
OhmsCoeffs* __restrict__ p_Ohms,
v4 * __restrict__ p_v0,
f64 * __restrict__ p_Lap_Az_use,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_coeff_of_vez_upon_viz,
f64 * __restrict__ p_beta_ie_z,
AAdot * __restrict__ p_AAdot_k,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma,
f64 * __restrict__ ROCAzdotduetoAdvection
)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 const Lap_Az_used = p_Lap_Az_use[iMinor];
structural const info = p_info[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
v4 v0 = p_v0[iMinor];
// Cancel the part that was added in order to get at Ez_strength:
f64 denom_e = p_denom_e[iMinor];
f64 denom_i = p_denom_i[iMinor];
v0.viz += qoverM*h_use*h_use* c* Lap_Az_used / denom_i; // adaptation for this.
f64 coeff_of_vez_upon_viz = p_coeff_of_vez_upon_viz[iMinor];
f64 cancel_from_vez = -eoverm*h_use*h_use* c* Lap_Az_used / denom_e
+ coeff_of_vez_upon_viz * qoverM*h_use*h_use* c* Lap_Az_used / denom_i;
v0.vez += cancel_from_vez;
f64 beta_ie_z = p_beta_ie_z[iMinor];
v0.viz += beta_ie_z * cancel_from_vez;
// We see now that re-jigging things is absolutely not what we should have done.
// It will make the most complicated overspilling routine, more complicated still.
OhmsCoeffs Ohms = p_Ohms[iMinor];
f64 vez_1 = v0.vez + Ohms.sigma_e_zz * Ez_strength;
f64 viz_1 = v0.viz + Ohms.sigma_i_zz * Ez_strength;
nvals n_use = p_n_minor[iMinor];
p_Azdot0[iMinor] = p_AAdot_k[iMinor].Azdot
+ h_use * ROCAzdotduetoAdvection[iMinor] // our prediction contains this
+ h_use *c*4.0*M_PI* q*n_use.n*(viz_1 - vez_1);
// ROCAzdot_antiadvect --- we need this to be in there only
// on cycles that we do advection
// So do the addition in here.
f64 viz0_coeff_on_Lap_Az = h_use*h_use*qoverM*c / denom_i;
f64 vez0_coeff_on_Lap_Az = h_use*h_use*eoverm*c / denom_e
+ coeff_of_vez_upon_viz*viz0_coeff_on_Lap_Az;
viz0_coeff_on_Lap_Az += beta_ie_z*vez0_coeff_on_Lap_Az;
p_gamma[iMinor] = h_use*c*c*(1.0 + FOURPI_OVER_C * q*n_use.n*
(viz0_coeff_on_Lap_Az - vez0_coeff_on_Lap_Az));
} else {
// We need to do the same sort of thing here as in CalcVelocityAzdot :
f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
if (info.flag == REVERSE_JZ_TRI)
{
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
}
p_Azdot0[iMinor] = p_AAdot_k[iMinor].Azdot + h_use*c*FOUR_PI*Jz;
p_gamma[iMinor] = h_use * c*c;
if ((info.flag == INNER_FRILL) || (info.flag == OUTER_FRILL))
{
p_Azdot0[iMinor] = 0.0; // difference found? But we did set = 0 on CPU.
p_gamma[iMinor] = 0.0;
}
};
}
__global__ void kernelCreateLinearRelationshipBwd_noadvect(
f64 const h_use,
structural * __restrict__ p_info,
OhmsCoeffs* __restrict__ p_Ohms,
v4 * __restrict__ p_v0,
f64 * __restrict__ p_Lap_Az_use,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_coeff_of_vez_upon_viz,
f64 * __restrict__ p_beta_ie_z,
AAdot * __restrict__ p_AAdot_k,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma
)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 const Lap_Az_used = p_Lap_Az_use[iMinor];
structural const info = p_info[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
v4 v0 = p_v0[iMinor];
// Cancel the part that was added in order to get at Ez_strength:
f64 denom_e = p_denom_e[iMinor];
f64 denom_i = p_denom_i[iMinor];
v0.viz += qoverM*h_use*h_use* c* Lap_Az_used/denom_i; // adaptation for this.
f64 coeff_of_vez_upon_viz = p_coeff_of_vez_upon_viz[iMinor];
f64 cancel_from_vez = -eoverm*h_use*h_use* c* Lap_Az_used / denom_e
+ coeff_of_vez_upon_viz * qoverM*h_use*h_use* c* Lap_Az_used / denom_i;
v0.vez += cancel_from_vez;
f64 beta_ie_z = p_beta_ie_z[iMinor];
v0.viz += beta_ie_z * cancel_from_vez;
// We see now that re-jigging things is absolutely not what we should have done.
// It will make the most complicated overspilling routine, more complicated still.
OhmsCoeffs Ohms = p_Ohms[iMinor];
f64 vez_1 = v0.vez + Ohms.sigma_e_zz * Ez_strength;
f64 viz_1 = v0.viz + Ohms.sigma_i_zz * Ez_strength;
nvals n_use = p_n_minor[iMinor];
p_Azdot0[iMinor] = p_AAdot_k[iMinor].Azdot
+ h_use *c*4.0*M_PI* q*n_use.n*(viz_1 - vez_1);
// if ((iMinor - BEGINNING_OF_CENTRAL == VERTCHOSEN) || (iMinor - BEGINNING_OF_CENTRAL == VERTCHOSEN2))
// printf("%d : AAdot_k.Azdot %1.10E n_use.n %1.9E viz1 %1.9E vez1 %1.9E\n",
// iMinor, p_AAdot_k[iMinor].Azdot, n_use.n, viz_1, vez_1);
// ROCAzdot_antiadvect --- we need this to be in there only
// on cycles that we do advection
// So do the addition in here.
// THIS WAS IN ERROR.
f64 viz0_coeff_on_Lap_Az = -h_use*h_use*qoverM*c / denom_i;
f64 vez0_coeff_on_Lap_Az = h_use*h_use*eoverm*c / denom_e
+ coeff_of_vez_upon_viz*viz0_coeff_on_Lap_Az;
viz0_coeff_on_Lap_Az += beta_ie_z*vez0_coeff_on_Lap_Az;
p_gamma[iMinor] = h_use*c*c*(1.0 + FOURPI_OVER_C * q*n_use.n*
(viz0_coeff_on_Lap_Az - vez0_coeff_on_Lap_Az));
} else {
// We need to do the same sort of thing here as in CalcVelocityAzdot :
f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
if (info.flag == REVERSE_JZ_TRI)
{
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
}
p_Azdot0[iMinor] = p_AAdot_k[iMinor].Azdot
+ h_use*c*FOUR_PI*Jz;
p_gamma[iMinor] = h_use * c*c;
if ((info.flag == INNER_FRILL) || (info.flag == OUTER_FRILL))
{
p_Azdot0[iMinor] = 0.0; // difference found? But we did set = 0 on CPU.
p_gamma[iMinor] = 0.0;
}
};
}
/*
__global__ void kernelPopulateOhmsLaw(
f64 h_use,
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
f64_vec3 * __restrict__ p_B,
f64 * __restrict__ p_LapAz,
f64_vec2 * __restrict__ p_GradAz,
f64_vec2 * __restrict__ p_GradTe,
nvals * __restrict__ p_n_minor_use,
nvals * __restrict__ p_one_over_n,
T3 * __restrict__ p_T_minor_use,
v4 * __restrict__ p_vie_src,
f64_vec3 * __restrict__ p_v_n_src,
AAdot * __restrict__ p_AAdot_src,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ ROCAzdotduetoAdvection,
// Now going to need to go through and see this set 0 or sensible every time.
f64_vec3 * __restrict__ p_vn0_dest,
v4 * __restrict__ p_v0_dest,
OhmsCoeffs * __restrict__ p_OhmsCoeffs_dest,
AAdot * __restrict__ p_AAdot_intermediate,
f64 * __restrict__ p_Iz0,
f64 * __restrict__ p_sigma_zz,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_effect_of_viz0_on_vez0,
f64 * __restrict__ p_beta_ie_z,
bool const bSwitchSave,
bool const bUse_dest_n_for_Iz,
nvals * __restrict__ p_n_dest_minor) // for turning on save of these denom_ quantities
{
// Don't forget we can use 16KB shared memory to save a bit of overspill:
// (16*1024)/(512*8) = 4 doubles only for 512 threads. 128K total register space per SM we think.
__shared__ f64 Iz[threadsPerTileMinor], sigma_zz[threadsPerTileMinor];
// __shared__ f64 Iz_k[threadsPerTileMinor];
__shared__ f64_vec2 omega[threadsPerTileMinor], grad_Az[threadsPerTileMinor],
gradTe[threadsPerTileMinor];
// Putting 8 reduces to 256 simultaneous threads. Experiment with 4 in shared.
// f64 viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az; // THESE APPLY TO FEINT VERSION. ASSUME NOT FEINT FIRST.
v4 v0;
f64 denom, ROCAzdot_antiadvect, AreaMinor;
f64_vec3 vn0;
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[iMinor];
// Can see no reason not to put OUTERMOST here. No point creating a big gradient of vz to it.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 vie_k = p_vie_src[iMinor];
f64_vec3 v_n_src = p_v_n_src[iMinor];
nvals n_use = p_n_minor_use[iMinor];
AreaMinor = p_AreaMinor[iMinor];
// Are we better off with operator = or with memcpy?
vn0 = v_n_src;
// if ((TESTTRI)) printf("GPU %d vie_k %1.14E %1.14E\n", iMinor, vie_k.vxy.x, vie_k.vxy.y);
{
f64_vec3 MAR;
memcpy(&MAR, p_MAR_neut + iMinor, sizeof(f64_vec3));
// CHECK IT IS INTENDED TO AFFECT Nv
// REVERTED THE EDIT TO USE 1/n -- THIS WILL NOT GIVE CORRECT M.A.R. EFFECT ON INTEGRAL nv
// We need conservation laws around shock fronts.
vn0.x += h_use * (MAR.x / (AreaMinor*n_use.n_n));
// p_one_over_n[iMinor].n_n/ (AreaMinor));
vn0.y += h_use * (MAR.y/(AreaMinor*n_use.n_n));// MomAddRate is addition rate for Nv. Divide by N.
memcpy(&MAR, p_MAR_ion + iMinor, sizeof(f64_vec3));
v0.vxy = vie_k.vxy + h_use * (m_i*MAR.xypart()/ (n_use.n*(m_i + m_e)*AreaMinor));
v0.viz = vie_k.viz + h_use * MAR.z / (n_use.n*AreaMinor);
if (((TESTTRI))) printf("\nGPU %d vxyk %1.10E %1.10E aMAR_i.y %1.10E MAR.y %1.10E 1/n %1.10E Area %1.10E\n", iMinor,
v0.vxy.x, v0.vxy.y,
h_use * (m_i*MAR.y / (n_use.n*(m_i + m_e)*AreaMinor)),
MAR.y,
p_one_over_n[iMinor].n,
AreaMinor);
memcpy(&MAR, p_MAR_elec + iMinor, sizeof(f64_vec3));
v0.vxy += h_use * (m_e*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor); // UM WHY WAS THIS NEGATIVE
// + !!!!
if (v0.vez != v0.vez) printf("NANVEZ %d v_k %1.9E MAR.z %1.9E \n", iMinor, vie_k.vez, MAR.z);
if (((TESTTRI))) printf("\nGPU %d a:MAR_e %1.10E %1.10E MAR.y %1.10E 1/n %1.10E Area %1.10E\n", iMinor,
h_use * (m_e*MAR.x/ (n_use.n*(m_i + m_e)*AreaMinor)),
h_use * (m_e*MAR.y/ (n_use.n*(m_i + m_e)*AreaMinor)),
MAR.y,
p_one_over_n[iMinor].n, AreaMinor);
// if (((TESTTRI)))
// printf("GPU %d WITH MAR v0.vxy %1.14E %1.14E\n", CHOSEN, v0.vxy.x, v0.vxy.y);
// printf("GPU %d data_k %1.10E %1.10E MAR %1.10E %1.10E\n", CHOSEN, vie_k.vxy.x, vie_k.vxy.y,
// MAR.x, MAR.y);
// printf("GPU %d n %1.12E AreaMinor %1.12E \n", CHOSEN, n_use.n, AreaMinor);
// }
}
OhmsCoeffs ohm;
f64 beta_ie_z, LapAz;
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_minor_use[iMinor];
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
//nu_ne_MT = s_en_MT * electron_thermal * n_use.n; // have to multiply by n_e for nu_ne_MT
//nu_ni_MT = s_in_MT * ionneut_thermal * n_use.n;
//nu_in_MT = s_in_MT * ionneut_thermal * n_use.n_n;
//nu_en_MT = s_en_MT * electron_thermal * n_use.n_n;
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*n_use.n*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
if (nu_eiBar != nu_eiBar) printf("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n"
"iMinor %d n_use.n %1.9E lnLambda %1.9E Te %1.9E sqrt %1.9E \n",
iMinor, n_use.n, lnLambda, T.Te, sqrt_Te);
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
}
}
vn0.x += -0.5*h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n)*(v_n_src.x - vie_k.vxy.x)
- 0.5*h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n)*(v_n_src.x - vie_k.vxy.x);
vn0.y += -0.5*h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n)*(v_n_src.y - vie_k.vxy.y)
- 0.5*h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n)*(v_n_src.y - vie_k.vxy.y);
vn0.z += -0.5*h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n)*(v_n_src.z - vie_k.vez)
- 0.5*h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n)*(v_n_src.z - vie_k.viz);
denom = 1.0 + h_use * 0.5*M_e_over_en* (cross_section_times_thermal_en*n_use.n)
+ 0.5*h_use*M_i_over_in* (cross_section_times_thermal_in*n_use.n);
vn0 /= denom; // It is now the REDUCED value
if (((TESTTRI)))
printf("GPU %d vn0 %1.9E %1.9E %1.9E denom %1.14E \n", CHOSEN, vn0.x, vn0.y, vn0.z, denom);
ohm.beta_ne = 0.5*h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n) / denom;
ohm.beta_ni = 0.5*h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n) / denom;
// Now we do vexy:
grad_Az[threadIdx.x] = p_GradAz[iMinor];
gradTe[threadIdx.x] = p_GradTe[iMinor];
LapAz = p_LapAz[iMinor];
f64 ROCAzdot_antiadvect = ROCAzdotduetoAdvection[iMinor];
if (((TESTTRI))) printf("GPU %d: LapAz %1.14E\n", CHOSEN, LapAz);
// %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
// Here is where we should be using v_use:
// We do midpoint instead? Why not? Thus allowing us not to load v_use.
// %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
v0.vxy +=
-h_use * (q / (2.0*c*(m_i + m_e)))*(vie_k.vez - vie_k.viz)*grad_Az[threadIdx.x]
- (h_use / (2.0*(m_i + m_e)))*(m_n*M_i_over_in*(cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*
(vie_k.vxy - v_n_src.xypart() - vn0.xypart());
if (((TESTTRI))) printf("GPU %d vzgradAz contrib_k %1.10E %1.10E vez_k viz_k %1.9E %1.9E gradAz %1.9E %1.9E\n", iMinor,
-h_use * (q / (2.0*c*(m_i + m_e)))*(vie_k.vez - vie_k.viz)*grad_Az[threadIdx.x].x,
-h_use * (q / (2.0*c*(m_i + m_e)))*(vie_k.vez - vie_k.viz)*grad_Az[threadIdx.x].y, vie_k.vez, vie_k.viz,
grad_Az[threadIdx.x].x, grad_Az[threadIdx.x].y);
denom = 1.0 + (h_use / (2.0*(m_i + m_e)))*(
m_n* M_i_over_in* (cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*(1.0 - ohm.beta_ne - ohm.beta_ni);
v0.vxy /= denom;
//
if (((TESTTRI)))
printf("GPU %d v0.vxy %1.14E %1.14E denom %1.14E \n"
"nu_in_MT %1.14E nu_en_MT %1.14E beta_ne %1.14E \n",
CHOSEN, v0.vxy.x, v0.vxy.y, denom,
cross_section_times_thermal_in*n_use.n_n, cross_section_times_thermal_en*n_use.n_n, ohm.beta_ne);
ohm.beta_xy_z = (h_use * q / (2.0*c*(m_i + m_e)*denom)) * grad_Az[threadIdx.x];
/////////////////////////////////////////////////////////////////////////////// midpoint
// if (((TESTTRI))) printf("ohm.beta_xy_z %1.14E \n", ohm.beta_xy_z);
omega[threadIdx.x] = qovermc*p_B[iMinor].xypart();
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*(nu_eHeart*nu_eHeart + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].x*omega[threadIdx.x].x + omega[threadIdx.x].y*omega[threadIdx.x].y + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)));
// if (nu_ei_effective != nu_ei_effective) printf("nu_ei NaN: omega %1.8E %1.8E nu_eHeart %1.8E nu_eiBar %1.8E\n",
// omega[threadIdx.x].x, omega[threadIdx.x].y, nu_eHeart, nu_eiBar);
AAdot AAzdot_k = p_AAdot_src[iMinor];
//if ((iPass == 0) || (bFeint == false))
{
// if (((TESTTRI)) && (0)) printf("viz0: %1.14E\n", v0.viz);
if (((TESTTRI))) printf("GPU %d: LapAz %1.14E\n", CHOSEN, LapAz); // nonzero
v0.viz +=
-0.5*h_use*qoverMc*(2.0*AAzdot_k.Azdot
+ h_use * ROCAzdot_antiadvect + h_use * c*c*(LapAz
+ FOURPI_OVER_C*0.5 * q*n_use.n*(vie_k.viz - vie_k.vez)))
- 0.5*h_use*qoverMc*(vie_k.vxy + v0.vxy).dot(grad_Az[threadIdx.x]);
if (((TESTTRI))) {
printf("viz0 I: %1.14E contribs:\n", v0.viz);
printf(" Azdotk %1.14E \n ROC %1.14E\n JviaAzdot %1.14E\n lorenzmag %1.14E\n",
-0.5*h_use*qoverMc*(2.0*AAzdot_k.Azdot),
-0.5*h_use*qoverMc*h_use * ROCAzdot_antiadvect,
-0.5*h_use*qoverMc*h_use * c*c*(FOURPI_OVER_C*0.5 * q*n_use.n*(vie_k.viz - vie_k.vez)),
-0.5*h_use*qoverMc*(vie_k.vxy + v0.vxy).dot(grad_Az[threadIdx.x])
);
printf("due to LapAz: %1.14E = %1.6E %1.6E %1.6E %1.6E\n",
-0.5*h_use*qoverMc*h_use *c*c*LapAz,
h_use*h_use*0.5,
qoverMc,
c*c,
LapAz); // == 0
};
}
//else {
// viz0 = data_k.viz
// - h_use * MomAddRate.ion.z / (data_use.n*AreaMinor)
// - 0.5*h_use*qoverMc*(2.0*data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(TWOPIoverc * q*data_use.n*(data_k.viz - data_k.vez)))
// - 0.5*h_use*qoverMc*(data_k.vxy + vxy0).dot(grad_Az[threadIdx.x]);
// };
//
// Still omega_ce . Check formulas.
//
v0.viz +=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x])));
// if (((TESTTRI))) printf("viz0 with thermal force %1.14E \n", v0.viz);
v0.viz += -h_use * 0.5*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(vie_k.viz - v_n_src.z - vn0.z) // THIS DOESN'T LOOK RIGHT
+ h_use * 0.5*(moverM)*nu_ei_effective*(vie_k.vez - vie_k.viz);
if (((TESTTRI))) printf("viz0 contrib i-n %1.14E contrib e-i %1.14E\nviz0 %1.14E\n",
-h_use * 0.5*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(vie_k.viz - v_n_src.z - vn0.z),
h_use * 0.5*(moverM)*nu_ei_effective*(vie_k.vez - vie_k.viz), v0.viz
);
denom = 1.0 + h_use * h_use*M_PI*qoverM*q*n_use.n + h_use * 0.5*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) +
h_use * 0.5*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(1.0 - ohm.beta_ni) + h_use * 0.5*moverM*nu_ei_effective;
if (bSwitchSave) p_denom_i[iMinor] = denom;
// viz0_coeff_on_Lap_Az = -0.5*h_use*qoverMc*h_use*c*c / denom;
v0.viz /= denom;
if (((TESTTRI))) printf("viz0 divided %1.14E denom %1.14E \n", v0.viz, denom);
ohm.sigma_i_zz = h_use * qoverM / denom;
beta_ie_z = (h_use*h_use*M_PI*qoverM*q*n_use.n
+ 0.5*h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))
+ h_use * 0.5*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne
+ h_use * 0.5*moverM*nu_ei_effective) / denom;
if (((TESTTRI2))) printf("vez0 %1.14E \n", v0.vez);
v0.vez +=
h_use * 0.5*qovermc*(2.0*AAzdot_k.Azdot
+ h_use * ROCAzdot_antiadvect
+ h_use * c*c*(LapAz
+ 0.5*FOURPI_Q_OVER_C*n_use.n*(vie_k.viz + v0.viz - vie_k.vez))) // ?????????????????
+ 0.5*h_use*qovermc*(vie_k.vxy + v0.vxy + v0.viz * ohm.beta_xy_z).dot(grad_Az[threadIdx.x]);
if (((TESTTRI2)))
printf(" %d v0.vez %1.14E Azdotctb %1.14E antiadvect %1.14E LapAzctb %1.14E \n"
"%d JviaAzdot %1.14E lorenzmag %1.14E \n",
iMinor, v0.vez, h_use * 0.5*qovermc*2.0*AAzdot_k.Azdot,
h_use * 0.5*qovermc*h_use * ROCAzdot_antiadvect,
h_use * 0.5*qovermc*h_use * c*c*LapAz,
iMinor,
h_use * 0.5*qovermc*h_use * c*c* 0.5*FOURPI_Q_OVER_C*n_use.n*(vie_k.viz + v0.viz - vie_k.vez),
0.5*h_use*qovermc*(vie_k.vxy + v0.vxy + v0.viz * ohm.beta_xy_z).dot(grad_Az[threadIdx.x])
);
// implies:
f64 effect_of_viz0_on_vez0 =
h_use * 0.5*qovermc*h_use * c*c*0.5*FOURPI_Q_OVER_C*n_use.n
+ 0.5*h_use*qovermc*( ohm.beta_xy_z.dot(grad_Az[threadIdx.x]));
v0.vez -=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x])+ qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT));
if (((TESTTRI2)))
printf("%d v0.vez TF contrib : %1.14E nu_eiBar %1.14E nu_eHeart %1.14E \n"
"%d omega %1.14E %1.14E %1.14E\n",iMinor,
-1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]) + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)),
nu_eiBar, nu_eHeart, iMinor,
omega[threadIdx.x].x, omega[threadIdx.x].y, qovermc*BZ_CONSTANT);
// could store this from above and put opposite -- dividing by m_e instead of m_i
v0.vez += -0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(vie_k.vez - v_n_src.z - vn0.z - ohm.beta_ni * v0.viz)
- 0.5*h_use*nu_ei_effective*(vie_k.vez - vie_k.viz - v0.viz);
// implies:
effect_of_viz0_on_vez0 +=
0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni + 0.5*h_use*nu_ei_effective;
if (
//(iMinor == 11761 + BEGINNING_OF_CENTRAL) ||
//(iMinor == 11616 + BEGINNING_OF_CENTRAL) ||
//(iMinor == 11762 + BEGINNING_OF_CENTRAL) ||
((TESTTRI2)) )
{
printf("%d cross_section_times_thermal_en %1.10E n_use.n_n %1.10E vezk %1.10E vez0 %1.10E Mnoverne %1.10E nu_ei_effective %1.10E \n",
iMinor, cross_section_times_thermal_en, n_use.n_n,
vie_k.vez, v0.vez,
M_n_over_ne, nu_ei_effective);
}
if (((TESTTRI2)))
printf("v0.vez contribs e-n e-i: %1.14E %1.14E v0.viz %1.14E\n",
-0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(vie_k.vez - v_n_src.z - vn0.z - ohm.beta_ni * v0.viz),
- 0.5*h_use*nu_ei_effective*(vie_k.vez - vie_k.viz - v0.viz),
v0.viz);
denom = 1.0 + (h_use*h_use*M_PI*q*eoverm*n_use.n
+ 0.5*h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z)
+ 0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z)
+ 0.5*h_use*nu_ei_effective*(1.0 - beta_ie_z);
// vez0_coeff_on_Lap_Az = h_use * h_use*0.5*qovermc* c*c / denom;
ohm.sigma_e_zz =
(-h_use * eoverm
+ h_use * h_use*M_PI*q*eoverm*n_use.n*ohm.sigma_i_zz
+ h_use * 0.5*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*ohm.sigma_i_zz
+ 0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni*ohm.sigma_i_zz
+ 0.5*h_use*nu_ei_effective*ohm.sigma_i_zz)
/ denom;
// if (((TESTTRI)1) || ((TESTTRI)2))
//printf("GPU %d vez0 before divide %1.14E \n", iMinor, v0.vez);
//
v0.vez /= denom;
effect_of_viz0_on_vez0 /= denom; // of course
//if (v0.vez != v0.vez) {
// printf("iMinor %d v0.vez %1.10E ohm.sigma_e %1.10E denom %1.10E \n"
// "%1.10E %1.10E %1.10E %1.10E n %1.10E Te %1.10E\n" ,
// iMinor, v0.vez, ohm.sigma_e_zz, denom,
// h_use*h_use*M_PI*q*eoverm*n_use.n,//*(1.0 - beta_ie_z) // this was ok
// 0.5*h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*(1.0 - beta_ie_z), // this was not ok
// 0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z),
// 0.5*h_use*nu_ei_effective,//*(1.0 - beta_ie_z) // this was not ok -- even though n,T come out ok
// n_use.n, T.Te);
//}
if ( ((TESTTRI2)))
printf("GPU %d v0.vez %1.14E denom %1.14E \n"
"ohm.sigma_e_zz %1.14E n_use %1.10E nn %1.10E Te %1.10E\n"
"%d %1.12E %1.12E %1.12E %1.12E %1.12E \n"
"%d denom %1.14E : %1.12E %1.12E %1.12E %1.12E\n",
iMinor, v0.vez, denom,
ohm.sigma_e_zz,
n_use.n,n_use.n_n, T.Te, iMinor, -h_use * eoverm,
h_use * h_use*M_PI*q*eoverm*n_use.n*ohm.sigma_i_zz,
h_use * 0.5*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*ohm.sigma_i_zz,
0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni*ohm.sigma_i_zz,
0.5*h_use*nu_ei_effective*ohm.sigma_i_zz,
iMinor, denom,
(h_use*h_use*M_PI*q*eoverm*n_use.n)*(1.0 - beta_ie_z),
(0.5*h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z),
0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z),
0.5*h_use*nu_ei_effective*(1.0 - beta_ie_z)
);
if (bSwitchSave) {
p_denom_e[iMinor] = denom;
p_effect_of_viz0_on_vez0[iMinor] = effect_of_viz0_on_vez0;
p_beta_ie_z[iMinor] = beta_ie_z; // see that doing it this way was not best.
} else {
// #########################################################################################################
// DEBUG: pass graphing parameters through these.
// #########################################################################################################
p_denom_i[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective;
p_denom_e[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n /
(M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective);
};
// Now update viz(Ez):
v0.viz += beta_ie_z * v0.vez;
ohm.sigma_i_zz += beta_ie_z * ohm.sigma_e_zz;
// sigma_e_zz and sigma_i_zz are change in vz for a change in Ez
{
f64 EzShape = GetEzShape(info.pos.modulus());
ohm.sigma_i_zz *= EzShape;
ohm.sigma_e_zz *= EzShape;
}
// Think maybe we should get rid of most of this routine out of the subcycle.
// Rate of acceleration over timestep due to resistance, pressure, thermal force etc could be stored.
// Saving off some eqn data isn't so bad when we probably overflow registers and L1 here anyway.
// All we need is to know that we update sigma
// We can do addition of
// ==============================================================================================
p_v0_dest[iMinor] = v0;
p_OhmsCoeffs_dest[iMinor] = ohm;
p_vn0_dest[iMinor] = vn0;
if (bUse_dest_n_for_Iz) {
f64 ndest = p_n_dest_minor[iMinor].n;
Iz[threadIdx.x] = q*AreaMinor*ndest*(v0.viz - v0.vez);
sigma_zz[threadIdx.x] = q*AreaMinor*ndest*(ohm.sigma_i_zz - ohm.sigma_e_zz);
if (((TESTTRI2))) {
printf( "ndest %1.12E sigma_zz/Area %1.12E AreaMinor %1.12E\n\n",
ndest, q*ndest*(ohm.sigma_i_zz - ohm.sigma_e_zz), AreaMinor);
}
} else {
// On intermediate substeps, the interpolated n that applies halfway through the substep is a reasonable choice...
Iz[threadIdx.x] = q*AreaMinor*n_use.n*(v0.viz - v0.vez);
sigma_zz[threadIdx.x] = q*AreaMinor*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz);
// I'm sure we can do better on this. But we also might prefer to excise a lot of this calc from the subcycle.
if (((TESTTRI2))) {
printf("n_use.n %1.12E sigma_zz/Area %1.12E AreaMinor %1.12E\n\n",
n_use.n, q*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz), AreaMinor);
}
}
// Totally need to be skipping the load of an extra n.
// ^^ old remark.
// But it's too messy never loading it. t_half means changing all the
// Iz formula to involve v_k. Don't want that.
// if (blockIdx.x == 340) printf("%d: %1.14E %1.14E \n",
// iMinor, q*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz), sigma_zz[threadIdx.x]);
// On iPass == 0, we need to do the accumulate.
// p_Azdot_intermediate[iMinor] = Azdot_k
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*n_use.n*(data_k.viz - data_k.vez)); // INTERMEDIATE
//if ((0) && ((TESTTRI))) printf("******************* AAzdot_k.Azdot %1.14E \n", AAzdot_k.Azdot);
AAzdot_k.Azdot +=
h_use * ROCAzdot_antiadvect + h_use * c*c*(LapAz +
0.5*FOURPI_OVER_C * q*n_use.n*(vie_k.viz - vie_k.vez)); // INTERMEDIATE
p_AAdot_intermediate[iMinor] = AAzdot_k; // not k any more
//Iz_k[threadIdx.x] = q*n_use.n*(vie_k.viz - vie_k.vez)*AreaMinor;
//if ((0) && ((TESTTRI))) {
// printf("\n!!! kernelPopOhms GPU %d: \n******* Azdot_intermediate %1.14E vie_k %1.14E %1.14E\n"
// "antiadvect %1.10E Lapcontrib %1.13E Jcontrib_k %1.14E\n\n",
// CHOSEN, p_AAdot_intermediate[iMinor].Azdot,
// vie_k.viz, vie_k.vez,
// h_use * ROCAzdot_antiadvect,
// h_use * c*c*LapAz,
// h_use * c*c*0.5*FOURPI_OVER_C * q*n_use.n*(vie_k.viz - vie_k.vez)
// );
//}
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(LapAz +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz + data_1.viz
// - data_k.vez - data_1.vez));
} else {
// Non-domain triangle or vertex
// ==============================
// Need to decide whether crossing_ins triangle will experience same accel routine as the rest?
// I think yes so go and add it above??
// We said v_r = 0 necessarily to avoid sending mass into ins.
// So how is that achieved there? What about energy loss?
// Need to determine a good way. Given what v_r in tri represents. We construe it to be AT the ins edge so
// ...
Iz[threadIdx.x] = 0.0;
sigma_zz[threadIdx.x] = 0.0;
if ((iMinor < BEGINNING_OF_CENTRAL) && ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)))
{
p_AAdot_intermediate[iMinor].Azdot = 0.0;
// Set Az equal to neighbour in every case, after Accelerate routine.
} else {
// Let's make it go right through the middle of a triangle row for simplicity.
//f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
//{
// // Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// // ASSUME we are fed Iz_prescribed.
// //Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
// AreaMinor = p_AreaMinor[iMinor];
// Jz = negative_Iz_per_triangle / AreaMinor; // Iz would come from multiplying back by area and adding.
//};
AAdot temp = p_AAdot_src[iMinor];
temp.Azdot += h_use * c*(c*p_LapAz[iMinor]);// +4.0*M_PI*Jz);
// + h_use * ROCAzdot_antiadvect // == 0
p_AAdot_intermediate[iMinor] = temp; //
};
};
__syncthreads();
// .Collect Jz = Jz0 + sigma_zz Ez_strength on each minor cell
// .Estimate Ez
// sigma_zz should include EzShape for this minor cell
// The mission if iPass == 0 was passed is to save off Iz0, SigmaIzz.
// First pass set Ez_strength = 0.0.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + k];
Iz[threadIdx.x] += Iz[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + s - 1];
Iz[threadIdx.x] += Iz[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sigma_zz[blockIdx.x] = sigma_zz[0];
p_Iz0[blockIdx.x] = Iz[0];
}
// Wish to make the Jz contribs to Azdot on each side of the ins exactly equal in L1,
// meant making this long routine even longer with collecting Iz_k.
}
*/
__global__ void kernelCollectOhmsGraphs(
structural * __restrict__ p_info_major,
f64_vec3 * __restrict__ p_MAR_ion_pressure_major,
f64_vec3 * __restrict__ p_MAR_ion_visc_major,
f64_vec3 * __restrict__ p_MAR_elec_pressure_major, // need to distinguish viscous from pressure part.
f64_vec3 * __restrict__ p_MAR_elec_visc_major,
f64_vec3 * __restrict__ p_MAR_elec_ionization_major,
f64_vec3 * __restrict__ p_B_major,
v4 * __restrict__ p_vie_k, // ALL MAJOR
v4 * __restrict__ p_vie_kplus,
f64_vec2 * __restrict__ p_GradTe_major,
nvals * __restrict__ p_n_major_use,
T3 * __restrict__ p_T_major_use,
AAdot * __restrict__ p_AAdot_kplus,
f64 * __restrict__ p_AreaMinor, // EXCEPT THIS ONE
f64 * __restrict__ p_Ohmsgraph_0, // elastic effective frictional coefficient zz
f64 * __restrict__ p_Ohmsgraph_1, // ionization effective frictional coefficient zz
f64 * __restrict__ p_Ohmsgraph_2, // 2 is combined y pressure accel rate
f64 * __restrict__ p_Ohmsgraph_3,// 3 is q/(M+m) Ez -- do we have
f64 * __restrict__ p_Ohmsgraph_4, // 4 is thermal force accel
f64 * __restrict__ p_Ohmsgraph_5, // T_zy
f64 * __restrict__ p_Ohmsgraph_6, // T_zz
f64 * __restrict__ p_Ohmsgraph_7, // T acting on pressure
f64 * __restrict__ p_Ohmsgraph_8, // T acting on electromotive
f64 * __restrict__ p_Ohmsgraph_9, // T acting on thermal force
f64 * __restrict__ p_Ohmsgraph_10, // prediction vez-viz
f64 * __restrict__ p_Ohmsgraph_11, // difference of prediction from vez_k
f64 * __restrict__ p_Ohmsgraph_12, // progress towards eqm: need vez_k+1
f64 * __restrict__ p_Ohmsgraph_13, // viscous acceleration of electrons and ions (z)
f64 * __restrict__ p_Ohmsgraph_14, // Prediction of Jz
f64 * __restrict__ p_Ohmsgraph_15, // sigma zy
f64 * __restrict__ p_Ohmsgraph_16, // sigma zz
f64 * __restrict__ p_Ohmsgraph_17, // sigma zz times electromotive
f64 * __restrict__ p_Ohmsgraph_18 // Difference of prediction from Jz predicted.
)
{
// Don't forget we can use 16KB shared memory to save a bit of overspill:
// (16*1024)/(512*8) = 4 doubles only for 512 threads. 128K total register space per SM we think.
__shared__ f64_vec2 gradTe[threadsPerTileMinor];
f64_vec3 omega_ce;
// Putting 8 reduces to 256 simultaneous threads. Experiment with 4 in shared.
// f64 viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az; // THESE APPLY TO FEINT VERSION. ASSUME NOT FEINT FIRST.
v4 v0;
f64 denom, AreaMinor;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_major[iVertex];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
v4 vie_k = p_vie_k[iVertex];
// f64_vec3 v_n_src = p_v_n_src[iMinor];
nvals n_use = p_n_major_use[iVertex];
AreaMinor = p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL];
f64_vec3 MAR_elec, MAR_ion;
memcpy(&MAR_elec, p_MAR_elec_ionization_major + iVertex, sizeof(f64_vec3));
p_Ohmsgraph_1[iVertex] = (MAR_elec.z / (n_use.n*AreaMinor*vie_k.vez));
// // ionization effective frictional coefficient zz
memcpy(&MAR_ion, p_MAR_ion_pressure_major + iVertex, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec_pressure_major + iVertex, sizeof(f64_vec3));
p_Ohmsgraph_2[iVertex] = (m_i*MAR_ion.y + m_e*MAR_elec.y)/((m_i + m_e)*(n_use.n*AreaMinor));
memcpy(&MAR_ion, p_MAR_ion_visc_major + iVertex, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec_visc_major + iVertex, sizeof(f64_vec3));
p_Ohmsgraph_13[iVertex] = (m_i*MAR_ion.y + m_e*MAR_elec.y) / ((m_i + m_e)*(n_use.n*AreaMinor));
// v0.vxy += h_use * (m_e*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
//v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_major_use[iVertex];
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*n_use.n*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
}
}
gradTe[threadIdx.x] = p_GradTe_major[iVertex];
omega_ce = qovermc*p_B_major[iVertex];
omega_ce.z = BZ_CONSTANT*qovermc;
f64 nu_ei_effective =
nu_eiBar * (1.0 - 0.9*nu_eiBar*
(nu_eHeart*nu_eHeart + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))));
p_Ohmsgraph_0[iVertex] = nu_ei_effective + M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n);
AAdot AAzdot_kplus = p_AAdot_kplus[iVertex];
p_Ohmsgraph_3[iVertex] = -(q * (m_i + m_e)/(m_e*m_i))*(Ez_strength*GetEzShape(info.pos.modulus()) - AAzdot_kplus.Azdot / c);
//v0.viz +=
// 1.5*h_use*nu_eiBar*(
// (omega_ce.x*qovermc*BZ_CONSTANT - nu_eHeart * omega_ce.y)*gradTe[threadIdx.x].x +
// (omega_ce.y*qovermc*BZ_CONSTANT + nu_eHeart * omega_ce.x)*gradTe[threadIdx.x].y) /
// (m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)));
//v0.viz += h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *vn0.z;
// denom = 1.0 + h_use * h_use*4.0*M_PI*qoverM*q*n_use.n
// + h_use * qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) +
// h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(1.0 - ohm.beta_ni)
// + h_use *moverM*nu_ei_effective;
// v0.viz /= denom;
// implies:
//f64 effect_of_viz0_on_vez0 =
// h_use * qovermc*h_use * c*c* FOURPI_Q_OVER_C*n_use.n
// + h_use*qovermc*(ohm.beta_xy_z.dot(grad_Az[threadIdx.x]));
// remember it's ve-vi :
p_Ohmsgraph_4[iVertex] = -1.5*nu_eiBar*((
(omega_ce.x*qovermc*BZ_CONSTANT - nu_eHeart * omega_ce.y)*gradTe[threadIdx.x].x +
(omega_ce.y*qovermc*BZ_CONSTANT + nu_eHeart * omega_ce.x)*gradTe[threadIdx.x].y) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))))*(1.0 / m_i + 1.0 / m_e);
// Now it's time to work out T_zy and T_zz:
f64 Tzy, Tzz;
f64 a_ = -0.9*nu_eiBar*nu_eiBar / (nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)));
f64 b_ = 1.0 - a_*nu_eHeart;
f64 c_ = M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) + nu_eiBar + nu_eHeart*nu_eHeart*a_;
Tzy = ((a_*b_*omega_ce.dot(omega_ce) + b_*c_)*omega_ce.x + (b_*b_ - a_*c_)*omega_ce.y*omega_ce.z) /
((a_*omega_ce.dot(omega_ce) + c_)*(b_*b_*omega_ce.dot(omega_ce) + c_*c_));
Tzz = (c_*c_ + a_*c_*omega_ce.dot(omega_ce) + (b_*b_-a_*c_)*omega_ce.z*omega_ce.z) /
((a_*omega_ce.dot(omega_ce) + c_)*(b_*b_*omega_ce.dot(omega_ce) + c_*c_));
// Notice: if omega_ce = 0 then we get Tzz = c*c/c*c*c = 1/c.
// a = -0.9*nu_eiBar^2/(nu_eHeart^3)
// Tzz = 1/(nu_en + nu_ei + nu_ei^2/(1.87nu_ei+nu_en)^3)
// Not sure that is what it's supposed to be.... it's close. Remember we had e-i.
p_Ohmsgraph_5[iVertex] = Tzy;
p_Ohmsgraph_6[iVertex] = Tzz;
p_Ohmsgraph_7[iVertex] = Tzy*p_Ohmsgraph_2[iVertex];
p_Ohmsgraph_8[iVertex] = Tzz*p_Ohmsgraph_3[iVertex];
p_Ohmsgraph_9[iVertex] = Tzz*p_Ohmsgraph_4[iVertex];
p_Ohmsgraph_10[iVertex] = p_Ohmsgraph_7[iVertex] + p_Ohmsgraph_8[iVertex] + p_Ohmsgraph_9[iVertex];
if (iVertex == VERTCHOSEN) printf("\n\nOhmsgraphs info %d : omega %1.8E %1.8E %1.8E abc %1.8E %1.8E %1.8E\n"
"nu_eiBar nu_eHeart nu_en %1.10E %1.10E %1.10E nu 1 and 2 %1.9E %1.9E ; \naccels 2 3 4 %1.9E %1.9E %1.9E\n"
"Tzy Tzz %1.9E %1.9E prediction %1.9E \n",
iVertex, omega_ce.x, omega_ce.y, omega_ce.z,
a_, b_, c_,
nu_eiBar, nu_eHeart, n_use.n_n*s_en_visc*electron_thermal,
p_Ohmsgraph_0[iVertex],p_Ohmsgraph_1[iVertex], p_Ohmsgraph_2[iVertex], p_Ohmsgraph_3[iVertex], p_Ohmsgraph_4[iVertex],
Tzy, Tzz, p_Ohmsgraph_10[iVertex]
);
v4 vie_kplus = p_vie_kplus[iVertex];
p_Ohmsgraph_11[iVertex] = p_Ohmsgraph_10[iVertex] - vie_k.vez + vie_k.viz;
p_Ohmsgraph_12[iVertex] = vie_kplus.vez - vie_kplus.viz - vie_k.vez + vie_k.viz;
p_Ohmsgraph_14[iVertex] = -q*n_use.n*p_Ohmsgraph_10[iVertex];
p_Ohmsgraph_15[iVertex] = eoverm*n_use.n*Tzy;
p_Ohmsgraph_16[iVertex] = eoverm*n_use.n*Tzz;
p_Ohmsgraph_17[iVertex] = -q*n_use.n*Tzz*p_Ohmsgraph_3[iVertex];
p_Ohmsgraph_18[iVertex] = p_Ohmsgraph_14[iVertex] - q*n_use.n*(vie_kplus.viz - vie_kplus.vez);
} else {
// Non-domain triangle or vertex
// ==============================
}
}
__global__ void MeasureAccelz(
structural * __restrict__ p_info,
v4 * __restrict__ p_vie_initial,
v4 * __restrict__ p_vie_final,
f64_vec3 * __restrict__ p_v_nk,
f64_vec3 * __restrict__ p_v_nkplus1,
f64 const h_use, // substep
f64_vec2 * __restrict__ pGradAz,
f64_vec2 * __restrict__ pGradTe,
AAdot * __restrict__ p_AAdot,
AAdot * __restrict__ p_AAdot_k,
f64 * __restrict__ pLapAz,
nvals * __restrict__ p_n_central,
T3 * __restrict__ p_T_central,
f64_vec3 * __restrict__ p_B,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
f64_vec3 * __restrict__ p_MAR_neut,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_arelz,
f64 * __restrict__ p_MAR_ion_effect,
f64 * __restrict__ p_MAR_elec_effect,
f64 * __restrict__ p_Ezext_electromotive,
f64 * __restrict__ p_inductive_electromotive,
f64 * __restrict__ p_vxB,
f64 * __restrict__ p_thermal_force_effect,
f64 * __restrict__ p_friction_neutrals,
f64 * __restrict__ p_friction_ei,
f64 * __restrict__ p_sum_of_effects,
f64 * __restrict__ p_difference
) {
long iVertex = blockDim.x*blockIdx.x + threadIdx.x;
structural info = p_info[iVertex];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 vie_i = p_vie_initial[iVertex];
v4 vie_f = p_vie_final[iVertex];
f64 accel;
p_arelz[iVertex] = (vie_f.vez - vie_f.viz - vie_i.vez + vie_i.viz) / h_use;
f64_vec2 Grad_Az = pGradAz[iVertex];
f64_vec2 gradTe = pGradTe[iVertex];
f64 Azdot = p_AAdot[iVertex].Azdot;
f64 dAzdt_k = p_AAdot_k[iVertex].Azdot;
f64 AreaMinor = p_AreaMinor[iVertex];
nvals n_use = p_n_central[iVertex];
if (iVertex == VERTCHOSEN) printf("iVertex = %d BOC = %d sum = %d \n",
iVertex, BEGINNING_OF_CENTRAL, iVertex + BEGINNING_OF_CENTRAL);
f64_vec3 MAR, MAR_ion, MAR_elec;
memcpy(&MAR_ion, p_MAR_ion + iVertex, sizeof(f64_vec3));
p_MAR_ion_effect[iVertex] = -MAR_ion.z / (n_use.n*AreaMinor); // note minus
memcpy(&MAR_elec, p_MAR_elec + iVertex, sizeof(f64_vec3));
p_MAR_elec_effect[iVertex] = MAR_elec.z / (n_use.n*AreaMinor);
p_Ezext_electromotive[iVertex] = -(eoverm + qoverM) * GetEzShape(info.pos.modulus()) * Ez_strength;
p_inductive_electromotive[iVertex] = (eoverm + qoverM) *Azdot / c;
p_vxB[iVertex] = (qovermc+qoverMc)*Grad_Az.dot(vie_f.vxy);
f64_vec3 omega_ce;
omega_ce.x = p_B[iVertex].x*qovermc;
omega_ce.y = p_B[iVertex].y*qovermc;
omega_ce.z = BZ_CONSTANT*qovermc;
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_central[iVertex];
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
//nu_ne_MT = s_en_MT * electron_thermal * n_use.n; // have to multiply by n_e for nu_ne_MT
//nu_ni_MT = s_in_MT * ionneut_thermal * n_use.n;
//nu_in_MT = s_in_MT * ionneut_thermal * n_use.n_n;
//nu_en_MT = s_en_MT * electron_thermal * n_use.n_n;
s_en_MT *= ArtificialUpliftFactor_MT(n_use.n, n_use.n_n);
s_in_MT *= ArtificialUpliftFactor_MT(n_use.n, n_use.n_n); // returns factor 1.0 if n+nn > 1.0e14.
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*max(n_use.n, MINIMUM_NU_EI_DENSITY)*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
f64 nu_ei_effective =
nu_eiBar * (1.0 - 0.9*nu_eiBar*(nu_eHeart*nu_eHeart + omega_ce.z*omega_ce.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce) )) );
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
}
p_thermal_force_effect[iVertex] =
// viz part:
-(1.5*nu_eiBar*(
(omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)))
)
// vez part:
- 1.5*nu_eiBar*(
(omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)));
;
f64_vec3 v_nk = p_v_nk[iVertex];
f64_vec3 v_nkplus1 = p_v_nkplus1[iVertex];
// Is this the right sign?
p_friction_neutrals[iVertex] =
M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n)*
(p_v_nkplus1[iVertex].z - vie_f.vez)
- M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n)*
(p_v_nkplus1[iVertex].z - vie_f.viz);
p_friction_ei[iVertex] = -(1.0 + moverM)*nu_ei_effective*(vie_f.vez-vie_f.viz);
p_sum_of_effects[iVertex] =
p_MAR_ion_effect[iVertex] + p_MAR_elec_effect[iVertex] +
p_Ezext_electromotive[iVertex] + p_inductive_electromotive[iVertex] +
p_vxB[iVertex] + p_thermal_force_effect[iVertex] +
p_friction_neutrals[iVertex] + p_friction_ei[iVertex]
;
// should equal acceleration that obtained. Is it different??
p_difference[iVertex] = p_arelz[iVertex] - p_sum_of_effects[iVertex];
if (TEST_VS_MATRIX2) {
printf("vie_f.vez %1.10E vie_i.vez %1.10E vie_f.viz %1.8E vie_i.viz %1.8E \narelz %1.13E hsub %1.9E \n sum %1.13E diff %1.8E \n\n",
vie_f.vez, vie_i.vez, vie_f.viz, vie_i.viz, p_arelz[iVertex], h_use, p_sum_of_effects[iVertex], p_difference[iVertex]);
printf("effects: %1.8E %1.8E Ez %1.8E %1.8E vxB %1.8E thermalforce %1.8E fric %1.8E %1.8E\n\n######################\n\n",
p_MAR_ion_effect[iVertex], p_MAR_elec_effect[iVertex],
p_Ezext_electromotive[iVertex], p_inductive_electromotive[iVertex],
p_vxB[iVertex], p_thermal_force_effect[iVertex],
p_friction_neutrals[iVertex], p_friction_ei[iVertex]);
//// Now consider an intermediate formula:
//f64 beta_ie_z = (h_use*h_use*4.0*M_PI*qoverM*q*n_use.n
// + h_use*qoverMc*(Grad_Az.dot(ohm.beta_xy_z))
// + h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne
// + h_use * moverM*nu_ei_effective) / denom;
//f64 denom = 1.0 + h_use*h_use*q*eoverm*FOUR_PI*n_use.n
// + M_n_over_ne*h_use*(cross_section_times_thermal_en*n_use.n_n)
// + h_use*nu_ei_effective
// + h_use*qovermc*(Grad_Az.dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z)
// ;
//f64 vez_test_2 = vie_i.vez
// + h_use*p_MAR_elec_effect[iVertex]
// + h_use*(-(eoverm) * GetEzShape(info.pos.modulus()) * Ez_strength)//p_Ezext_electromotive[iVertex]
// + h_use*qovermc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex] + FOURPI_OVER_C*q*n_use.n*vie_f.viz))
// + h_use*((qovermc + qoverMc)*Grad_Az.dot(vie_f.vxy)) //p_vxB[iVertex]
// + h_use*(-1.5*nu_eiBar*(
// (omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
// (omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
// (m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))))//p_thermal_force_effect[iVertex]
// - M_n_over_ne*h_use*(cross_section_times_thermal_en*n_use.n_n)*(-v_nkplus1.z)
// - h_use*nu_ei_effective*(-vie_f.viz);
//vez_test_2 /= denom;
//if (iVertex == VERTCHOSEN) {
// printf("MAR elec component %1.12E \n"
// "Ez ext component %1.12E \n",
// "dAz/dt component %1.12E vie_f.viz %1.12E\n",
// "v x B component %1.12E \n",
// "thermal force component %1.12E \n",
// "- M_n_over_ne*h_use*nu_en*(-v_nkplus1.z) %1.12E \n"
// "- h_use*nu_ei_effective*(-vie_f.viz) %1.12E \n"
// "denom %1.12E \n------------------\nresult vez = %1.12E",
// h_use*p_MAR_elec_effect[iVertex],
// h_use*(-(eoverm)* GetEzShape(info.pos.modulus()) * Ez_strength),
// h_use*qovermc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex] + FOURPI_OVER_C*q*n_use.n*vie_f.viz)),
// vie_f.viz,
// h_use*((qovermc + qoverMc)*Grad_Az.dot(vie_f.vxy)), //p_vxB[iVertex]
// h_use*(-1.5*nu_eiBar*(
// (omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
// (omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
// (m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))))//p_thermal_force_effect[iVertex]
// ,
// -M_n_over_ne*h_use*(cross_section_times_thermal_en*n_use.n_n)*(-v_nkplus1.z),
// -h_use*nu_ei_effective*(-vie_f.viz),
// denom, vez_test_2
// );
//}
// Recreate vie_f from components:
f64 vdifftest_1 = vie_i.vez - vie_i.viz + h_use*(p_MAR_ion_effect[iVertex] + p_MAR_elec_effect[iVertex] +
p_Ezext_electromotive[iVertex] + p_inductive_electromotive[iVertex] +
p_vxB[iVertex] + p_thermal_force_effect[iVertex] +
p_friction_neutrals[iVertex] + p_friction_ei[iVertex]);
// Produce vez 1 : make it simpler.
f64 vez_1 = vie_i.vez + h_use*p_MAR_ion_effect[iVertex]
+h_use*p_MAR_elec_effect[iVertex]
+ h_use*(-(eoverm)* GetEzShape(info.pos.modulus()) * Ez_strength)//p_Ezext_electromotive[iVertex]
+ h_use*qovermc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex] + FOURPI_OVER_C*q*n_use.n*vie_f.viz))
+ h_use*((qovermc + qoverMc)*Grad_Az.dot(vie_f.vxy)) //p_vxB[iVertex]
+ h_use*(-1.5*nu_eiBar*(
(omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))))//p_thermal_force_effect[iVertex]
- M_n_over_ne*h_use*(cross_section_times_thermal_en*n_use.n_n)*(vie_f.vez-v_nkplus1.z)
- h_use*nu_ei_effective*(vie_f.vez-vie_f.viz);
// f64 vdiff2 = vez_test_2 - vie_f.viz;
printf("vie_i.vez vie_f.vez diff | vdifftest_1 "//veztest_2 vdiff2
"\n%1.14E %1.14E %1.14E %1.14E vez1 %1.14E\n",
vie_i.vez, vie_f.vez, vie_f.vez-vie_f.viz, vdifftest_1, //vez_test_2, vdiff2,
vez_1);
printf("Azdot_k+1 %1.14E calc'd %1.14E dA/dt_k %1.10E LapAz %1.10E 4pi/c Jz %1.10E n %1.14E vie_f.viz %1.14E vie_f.vez %1.14E\n",
Azdot,
dAzdt_k + h_use*c*c*(pLapAz[iVertex] + FOURPI_OVER_C*q*n_use.n*(vie_f.viz - vie_f.vez)),
dAzdt_k, pLapAz[iVertex], FOURPI_OVER_C*q*n_use.n*(vie_f.viz - vie_f.vez),
n_use.n, vie_f.viz, vie_f.vez);
// Result : difference 2 is closer to the program difference. Diff 1 is quite different.
// Explain diff between diff 2 and diff 1.
// Magic up matrix eqn:
if (TEST_VS_MATRIX2) {
memcpy(&MAR, p_MAR_neut + iVertex, sizeof(f64_vec3));
// 1. Need to work out vn coefficients !!!
f64 nu_ne_MT = cross_section_times_thermal_en*n_use.n;
f64 nu_ni_MT = cross_section_times_thermal_in*n_use.n;
f64 nu_in_MT = cross_section_times_thermal_in*n_use.n_n;
f64 nu_en_MT = cross_section_times_thermal_en*n_use.n_n;
f64 denom = 1.0 + h_use*M_e_over_en*nu_ne_MT +
h_use*M_i_over_in*nu_ni_MT;
f64_vec3 vn0 = (v_nk + h_use*MAR / (AreaMinor*n_use.n_n))
/ denom;
f64 beta_ne = h_use*M_e_over_en*nu_ne_MT / denom;
f64 beta_ni = h_use*M_i_over_in*nu_ni_MT / denom;
printf("v_nk.xy %1.14E %1.14E MAR.xy %1.14E %1.14E Nn %1.14E Area %1.14E denom %1.14E\n", v_nk.x,
v_nk.y, MAR.x, MAR.y, (AreaMinor*n_use.n_n), AreaMinor, denom);
printf("vn0 %1.14E %1.14E %1.14E beta_ni %1.14E beta_ne %1.14E \n",
vn0.x, vn0.y, vn0.z, beta_ni, beta_ne);
// vx, vy :
// from bwd eqn :
// given Lap Az and EzStrength, (Azdot -- do both ways) :
// the Azdot we got given used the vie_f that got calculated, so no we have to go back to Lap Az.
// Do without Ez terms. Put into separate sigma_izz, sigma_ezz
// 2. vx equation?
f64 temp = (h_use / (m_i + m_e))*
(m_n*m_i*nu_in_MT / (m_i + m_n) + m_e*m_n*nu_en_MT / (m_e + m_n));
f64 M_i_over_ie = m_i / (m_i + m_e);
f64 M_e_over_ie = m_e / (m_i + m_e);
f64 M_n_over_in = m_n / (m_i + m_n);
f64 M_n_over_en = m_n / (m_e + m_n);
f64_vec2 vxy0 = vie_i.vxy
+ h_use*M_i_over_ie*MAR_ion.xypart()/(AreaMinor*n_use.n)
+ h_use*M_e_over_ie*MAR_elec.xypart()/(AreaMinor*n_use.n)
+ temp*vn0.xypart(); // added
printf("vxy0 components:\n"
"vie_i.vxy %1.14E %1.14E MAR_ion_contrib %1.14E %1.14E \n"
"MAR_elec_contrib %1.14E %1.14E temp %1.14E vn0contrib %1.14E %1.14E\n\n",
vie_i.vxy.x, vie_i.vxy.y,
h_use*M_i_over_ie*MAR_ion.x / (AreaMinor*n_use.n),
h_use*M_i_over_ie*MAR_ion.y / (AreaMinor*n_use.n),
h_use*M_e_over_ie*MAR_elec.x / (AreaMinor*n_use.n),
h_use*M_e_over_ie*MAR_elec.y / (AreaMinor*n_use.n),
temp,
temp*vn0.x, temp*vn0.y
);
f64 vx_viz = (h_use*q / (c*(m_i + m_e)))*Grad_Az.x;
f64 vx_vez = (-h_use*q / (c*(m_i + m_e)))*Grad_Az.x;
f64 vy_viz = (h_use*q / (c*(m_i + m_e)))*Grad_Az.y;
f64 vy_vez = (-h_use*q / (c*(m_i + m_e)))*Grad_Az.y;
f64 vxy_vxy = -temp*(1.0- beta_ne - beta_ni);
vxy_vxy -= 1.0; // move LHS over to RHS so we've got 0 = .
printf(" ... 1 vx vy viz vez \n");
printf(" vx %1.14E %1.14E %1.14E %1.14E %1.14E \n"
" vy %1.14E %1.14E %1.14E %1.14E %1.14E \n",
vxy0.x, vxy_vxy, 0.0, vx_viz, vx_vez,
vxy0.y, 0.0, vxy_vxy, vy_viz, vy_vez
);
// VERIFY AGAIN THAT THIS IS GIVING SAME COEFFICIENTS.
// Work systematically: reduce vxy equation and sub in.
denom = -vxy_vxy; // move to LHS ..
OhmsCoeffs ohm;
v4 v0;
v0.vxy = vxy0 / denom;
ohm.beta_xy_z.x = vx_viz/denom;
ohm.beta_xy_z.y = vy_viz/denom;
printf("=-----------------\nv0.vxy %1.14E %1.14E beta_xy_z %1.14E %1.14E \n---------------\n",
v0.vxy.x, v0.vxy.y, ohm.beta_xy_z.x, ohm.beta_xy_z.y);
f64 EzExt = Ez_strength*GetEzShape(info.pos.modulus());
// Worry afterwards about what sigma Ez does.
// Do this in stages.
f64 viz0 = vie_i.viz + h_use*MAR_ion.z / (AreaMinor*n_use.n)
+ h_use*qoverM*EzExt
- h_use*qoverMc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex]))
+ h_use*1.5*nu_eiBar*
((omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)))
+ h_use*M_n_over_in*nu_in_MT*vn0.z;
printf("viz0 components: vie_i.viz %1.14E \n"
"from MAR_ion.z : %1.14E | from EzExt %1.14E \n"
"from Azdot_k+hc^2LapAz %1.14E \n"
"thermalforceterm %1.14E \n"
"vn0.z effect %1.14E \n"
"total = viz0 : %1.14E \n",
vie_i.viz, h_use*MAR_ion.z / (AreaMinor*n_use.n),
h_use*qoverM*EzExt,
-h_use*qoverMc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex])),
h_use*1.5*nu_eiBar*
((omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))),
h_use*M_n_over_in*nu_in_MT*vn0.z, viz0
);
f64 viz_vx = -h_use*qoverMc*Grad_Az.x;
f64 viz_vy = -h_use*qoverMc*Grad_Az.y;
f64 viz_viz = -h_use*qoverM*h_use*4.0*M_PI*q*n_use.n
-h_use*M_n_over_in*nu_in_MT*(1.0-beta_ni)
- h_use*moverM*nu_ei_effective
;
f64 viz_vez = h_use*qoverM*h_use*4.0*M_PI*q*n_use.n
+ h_use*M_n_over_in*nu_in_MT*(beta_ne)
+ h_use*moverM*nu_ei_effective;
// Think about how it will be solved.
// Eqn: vxvx vx + vxvy vy + .. = -vx0.
// And here vxvx should include -1 vx
viz_viz -= 1.0;
printf(" viz %1.14E %1.14E %1.14E %1.14E %1.14E \n",
viz0, viz_vx, viz_vy, viz_viz, viz_vez);
f64 vez0 = vie_i.vez + h_use*MAR_elec.z / (AreaMinor*n_use.n)
- h_use*eoverm*EzExt
+ h_use*qovermc*(dAzdt_k + h_use*c*c*pLapAz[iVertex])
- h_use*1.5*nu_eiBar*
((omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)))
+ h_use*M_n_over_en*nu_en_MT*vn0.z;
printf("\nvez0 : vez_k %1.14E \n"
"MAReffect %1.14E \n"
"EzExteffect %1.14E \n"
"Azdot_effect %1.14E \n"
"thermalforceeffect %1.14E \n"
"vn0.z_effect %1.14E \n"
"vez0 %1.14E\n",
vie_i.vez,
h_use*MAR_elec.z / (AreaMinor*n_use.n),
-h_use*eoverm*EzExt,
h_use*qovermc*(dAzdt_k + h_use*c*c*pLapAz[iVertex]),
-h_use*1.5*nu_eiBar*
((omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))),
h_use*M_n_over_en*nu_en_MT*vn0.z,
vez0
);
f64 vez_vx = h_use*qovermc*Grad_Az.x;
f64 vez_vy = h_use*qovermc*Grad_Az.y;
f64 vez_viz = h_use*eoverm*h_use*4.0*M_PI*q*n_use.n
+ h_use*M_n_over_en*nu_en_MT*beta_ni
+ h_use*nu_ei_effective;
f64 vez_vez = -h_use*eoverm*h_use*4.0*M_PI*q*n_use.n
- h_use*M_n_over_en*nu_en_MT*(1.0-beta_ne)
- h_use*nu_ei_effective;
printf("\n vez_vx %1.14E vez_vez %1.14E \n"
"vezvez components: hhq4piqn/m %1.14E hnu_en %1.14E hnu_ei %1.14E\n\n",
vez_vx, vez_vez, -h_use*eoverm*h_use*4.0*M_PI*q*n_use.n,
- h_use*M_n_over_en*nu_en_MT*(1.0 - beta_ne),
- h_use*nu_ei_effective
);
// FIGURE IT OUT : WHAT DOES THIS GIVE AND DOES IT SATISFY BWD EQN
// It practically is the bwd eqn
// If the result from our PopOhms is different, should be possible to detect exactly why.
// We do not take Azdot_k+1 as given but this also should be possible to compute
// given the v_k+1 from this.
// This is bound to work.
vez_vez -= 1.0;
printf(" vez %1.14E %1.14E %1.14E %1.14E %1.14E \n",
vez0, vez_vx, vez_vy, vez_viz, vez_vez);
printf(" ----------------------------------------------------\n");
// Work systematically: reduce vxy equation and sub in.
denom = -vxy_vxy; // move to LHS ..
vxy0.x /= denom;
vxy0.y /= denom;
vx_viz /= denom;
vx_vez /= denom;
vy_viz /= denom;
vy_vez /= denom;
// Substitute in:
viz0 += viz_vx*vxy0.x + viz_vy*vxy0.y;
viz_viz += viz_vx*vx_viz + viz_vy*vy_viz;
viz_vez += viz_vx*vx_vez + viz_vy*vy_vez;
vez0 += vez_vx*vxy0.x + vez_vy*vxy0.y;
vez_viz += vez_vx*vx_viz + vez_vy*vy_viz;
vez_vez += vez_vx*vx_vez + vez_vy*vy_vez;
printf(" viz0 vizviz vizvez %1.14E %1.14E %1.14E \n",
viz0, viz_viz, viz_vez);
printf(" vez0 vezviz vezvez %1.14E %1.14E %1.14E \n",
vez0, vez_viz, vez_vez);
printf(" ----------------------------------------------------\n");
printf("REDUCE viz:\n");
viz0 /= viz_viz;
printf("viz0 %1.14E vizvez %1.14E \n", viz0, viz_vez);
printf(" ----------------------------------------------------\n");
};
};
};
}
__global__ void kernelPopulateBackwardOhmsLaw_noadvect(
f64 h_use,
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
f64_vec3 * __restrict__ p_B,
f64 * __restrict__ p_LapAz,
f64_vec2 * __restrict__ p_GradAz,
f64_vec2 * __restrict__ p_GradTe,
nvals * __restrict__ p_n_minor_use,
T3 * __restrict__ p_T_minor_use,
v4 * __restrict__ p_vie_src,
f64_vec3 * __restrict__ p_v_n_src,
AAdot * __restrict__ p_AAdot_src,
f64 * __restrict__ p_AreaMinor,
f64_vec3 * __restrict__ p_vn0_dest,
v4 * __restrict__ p_v0_dest,
OhmsCoeffs * __restrict__ p_OhmsCoeffs_dest,
//AAdot * __restrict__ p_AAdot_intermediate,
f64 * __restrict__ p_Iz0,
f64 * __restrict__ p_sigma_zz,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_effect_of_viz0_on_vez0,
f64 * __restrict__ p_beta_ie_z,
bool const bSwitchSave)
{
// Don't forget we can use 16KB shared memory to save a bit of overspill:
// (16*1024)/(512*8) = 4 doubles only for 512 threads. 128K total register space per SM we think.
__shared__ f64 Iz[threadsPerTileMinor], sigma_zz[threadsPerTileMinor];
// __shared__ f64 Iz_k[threadsPerTileMinor];
__shared__ f64_vec2 omega[threadsPerTileMinor], grad_Az[threadsPerTileMinor],
gradTe[threadsPerTileMinor];
// Putting 8 reduces to 256 simultaneous threads. Experiment with 4 in shared.
// f64 viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az; // THESE APPLY TO FEINT VERSION. ASSUME NOT FEINT FIRST.
v4 v0;
f64 denom, AreaMinor;
f64_vec3 vn0;
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[iMinor];
// Can see no reason not to put OUTERMOST here. No point creating a big gradient of vz to it.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 vie_k = p_vie_src[iMinor];
f64_vec3 v_n_src = p_v_n_src[iMinor];
nvals n_use = p_n_minor_use[iMinor];
AreaMinor = p_AreaMinor[iMinor];
// Are we better off with operator = or with memcpy?
vn0 = v_n_src;
// if ((TESTTRI)) printf("GPU %d vie_k %1.14E %1.14E\n", iMinor, vie_k.vxy.x, vie_k.vxy.y);
{
f64_vec3 MAR;
memcpy(&MAR, p_MAR_neut + iMinor, sizeof(f64_vec3));
// CHECK IT IS INTENDED TO AFFECT Nv
if (TEST_VS_MATRIX) {
printf("%d VS_MAT: v_nk.y %1.14E MAR.y %1.14E Nn %1.14E Area %1.14E\n",
iMinor, vn0.y, MAR.y, (AreaMinor*n_use.n_n), AreaMinor);
printf("%d VS_MAT: v_nk.x %1.14E MAR.x %1.14E Nn %1.14E \n",
iMinor, vn0.x, MAR.x, (AreaMinor*n_use.n_n));
};
// REVERTED THE EDIT TO USE 1/n -- THIS WILL NOT GIVE CORRECT M.A.R. EFFECT ON INTEGRAL nv
// We need conservation laws around shock fronts.
vn0.x += h_use * (MAR.x / (AreaMinor*n_use.n_n)); // p_one_over_n[iMinor].n_n/ (AreaMinor));
vn0.y += h_use * (MAR.y / (AreaMinor*n_use.n_n));// MomAddRate is addition rate for Nv. Divide by N.
vn0.z += h_use * (MAR.z / (AreaMinor*n_use.n_n));
memcpy(&MAR, p_MAR_ion + iMinor, sizeof(f64_vec3));
v0.vxy = vie_k.vxy + h_use * (m_i*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.viz = vie_k.viz + h_use * MAR.z / (n_use.n*AreaMinor);
if (TEST_VS_MATRIX) {
printf("%d VS_MAT viz_k %1.14E viz0 with MAR %1.14E \n",
iMinor, vie_k.viz, v0.viz);
}
if (TESTACCEL_X) printf("%d vx_k %1.9E with MARi %1.9E n %1.8E N %1.8E\n", iMinor, vie_k.vxy.x, v0.vxy.x,
n_use.n, n_use.n*AreaMinor);
if (TESTACCEL2) printf("%d vy_k %1.9E with MARi %1.9E MAR.y %1.9E\n", iMinor-BEGINNING_OF_CENTRAL, vie_k.vxy.y, v0.vxy.y,
MAR.y);
memcpy(&MAR, p_MAR_elec + iMinor, sizeof(f64_vec3));
v0.vxy += h_use * (m_e*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
if (TESTVEZ) printf("%d vez_k %1.9E MAR.z %1.9E N %1.9E \n",
iMinor, vie_k.vez, MAR.z, (n_use.n*AreaMinor));
if (TESTACCEL_X) printf("%d v0x with MARi+e %1.9E\n", iMinor - BEGINNING_OF_CENTRAL, v0.vxy.x);
if (TESTACCEL2) printf("%d v0y with MARi+e %1.9E MAR.y \n", iMinor - BEGINNING_OF_CENTRAL, v0.vxy.y, MAR.y);
if (v0.vez != v0.vez) printf("NANVEZ %d v_k %1.9E MAR.z %1.9E \n", iMinor, vie_k.vez, MAR.z);
if (TESTVEZ) printf("\nGPU %d MAR: changexy %1.10E %1.10E vezchange %1.10E Area %1.10E v0.vez %1.9E vie_k.vez %1.9E\n", iMinor,
h_use * (m_e*MAR.x / (n_use.n*(m_i + m_e)*AreaMinor)),
h_use * (m_e*MAR.y / (n_use.n*(m_i + m_e)*AreaMinor)),
h_use * MAR.z / (n_use.n*AreaMinor),
AreaMinor, v0.vez, vie_k.vez);
}
OhmsCoeffs ohm;
f64 beta_ie_z, LapAz;
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_minor_use[iMinor];
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
//nu_ne_MT = s_en_MT * electron_thermal * n_use.n; // have to multiply by n_e for nu_ne_MT
//nu_ni_MT = s_in_MT * ionneut_thermal * n_use.n;
//nu_in_MT = s_in_MT * ionneut_thermal * n_use.n_n;
//nu_en_MT = s_en_MT * electron_thermal * n_use.n_n;
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*max(MINIMUM_NU_EI_DENSITY,n_use.n)*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
// Confusing, why does this say that? We used visc en in nu_eHeart, explanation?
if (nu_eiBar != nu_eiBar) printf("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n"
"iMinor %d n_use.n %1.9E lnLambda %1.9E Te %1.9E sqrt %1.9E \n",
iMinor, n_use.n, lnLambda, T.Te, sqrt_Te);
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH*n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH*n_use.n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
};
// ARTIFICIAL CHANGE TO STOP HAVING TO WORRY ABOUT SILLY VALUES IN AREAS THAT DON'T MATTER MUCH :
cross_section_times_thermal_en *= ArtificialUpliftFactor_MT(n_use.n, n_use.n_n);
cross_section_times_thermal_in *= ArtificialUpliftFactor_MT(n_use.n, n_use.n_n); // returns factor 1.0 if n+nn > 1.0e14.
if (TESTVEZ) printf("Uplift factor %1.9E n %1.8E %1.8E\n", ArtificialUpliftFactor(n_use.n, n_use.n_n), n_use.n, n_use.n_n);
// DEBUG:
if (0)//iMinor == CHOSEN)
{
printf("%d xs_therm_in en %1.8E %1.8E nn %1.8E n %1.8E s_in en %1.8E %1.8E i-n therm %1.8E Uplift %1.8E\n",
iMinor,
cross_section_times_thermal_in, cross_section_times_thermal_en,
n_use.n_n, n_use.n, s_en_MT, s_in_MT, ionneut_thermal,
ArtificialUpliftFactor_MT(n_use.n, n_use.n_n));
// Did not trigger? What gives?
};
};
denom = 1.0 + h_use*M_e_over_en*(cross_section_times_thermal_en*n_use.n)
+ h_use*M_i_over_in*(cross_section_times_thermal_in*n_use.n);
if (TESTVNX) printf("%d v_n.x before divide %1.10E \n", iMinor, vn0.x);
if (TESTVNY) printf("%d v_n.y before divide %1.10E \n", iMinor, vn0.y);
vn0 /= denom; // It is now the REDUCED value
if (TESTVNX) printf("%d v_n.x after divide %1.10E \n", iMinor, vn0.x);
if (TESTVNY) printf("%d v_n.y after divide %1.10E \n", iMinor, vn0.y);
//if (iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL) {
// printf("vn_k %1.9E %1.9E %1.9E vn0 %1.9E %1.9E %1.9E denom %1.9E\n",
// v_n_src.x, v_n_src.y, v_n_src.z, vn0.x, vn0.y, vn0.z, denom);
//};
ohm.beta_ne = h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n) / denom;
ohm.beta_ni = h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n) / denom;
if (TEST_VS_MATRIX) printf("VS_MAT: vn0 %1.14E %1.14E %1.14E beta_ni %1.14E beta_ne %1.14E denom %1.14E\n",
vn0.x, vn0.y, vn0.z, ohm.beta_ni, ohm.beta_ne, denom);
// Now we do vexy:
grad_Az[threadIdx.x] = p_GradAz[iMinor];
gradTe[threadIdx.x] = p_GradTe[iMinor];
LapAz = p_LapAz[iMinor];
// debug:
if (LapAz != LapAz) printf("----------\n%d LapAz NaN\n---------\n", iMinor);
if (((TESTTRI))) printf("GPU %d: LapAz %1.14E\n", CHOSEN, LapAz);
v0.vxy +=
(h_use / ((m_i + m_e)))*(m_n*M_i_over_in*(cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*
( vn0.xypart()); // this reflects v_n and the next reflects minus itself
denom = 1.0 + (h_use / (m_i + m_e))*(
m_n* M_i_over_in* (cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*
(1.0 - ohm.beta_ne - ohm.beta_ni);
if (TEST_VS_MATRIX) printf("VS_MAT: vxy0 before divide %1.14E %1.14E denom %1.14E\n",
v0.vxy.x, v0.vxy.y, denom);
v0.vxy /= denom;
if (TESTACCEL_X) printf("%d v0x with neut soak %1.9E\n", iMinor, v0.vxy.x);
if (TESTACCEL2) printf("%d v0y with neut soak %1.9E\n", iMinor - BEGINNING_OF_CENTRAL, v0.vxy.y);
ohm.beta_xy_z = (h_use * q / (c*(m_i + m_e)*denom)) * grad_Az[threadIdx.x]; // coeff on viz-vez
if (TEST_VS_MATRIX) printf("VS_MAT: vxy0 %1.14E %1.14E beta_xy_z %1.14E %1.14E \n\n",
v0.vxy.x, v0.vxy.y, ohm.beta_xy_z.x, ohm.beta_xy_z.y);
// ================================================================================================
omega[threadIdx.x] = qovermc*p_B[iMinor].xypart();
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*(nu_eHeart*nu_eHeart + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].x*omega[threadIdx.x].x + omega[threadIdx.x].y*omega[threadIdx.x].y + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)));
AAdot AAzdot_k = p_AAdot_src[iMinor];
v0.viz +=
-h_use*qoverMc*(AAzdot_k.Azdot + h_use * c*c*LapAz)
- h_use*qoverMc*(v0.vxy).dot(grad_Az[threadIdx.x]);// v x B
if (TEST_VS_MATRIX) {
printf("%d VS_MAT viz0 %1.14E Azdot+ccLapAz term %1.14E vxy.gradAz term %1.14E \n",
iMinor, v0.viz,
-h_use*qoverMc*(AAzdot_k.Azdot + h_use * c*c*LapAz),
-h_use*qoverMc*(v0.vxy).dot(grad_Az[threadIdx.x])
);
}
// Still omega_ce . Check formulas.
v0.viz +=
1.5*h_use*nu_eiBar*(
(omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x])));
v0.viz += h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *vn0.z;
if (TEST_VS_MATRIX) {
printf("%d VS_MAT viz0 %1.14E thermalforceterm %1.14E vn0.z for friction %1.14E \n",
iMinor, v0.viz,
1.5*h_use*nu_eiBar*(
(omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]))),
h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *vn0.z
);
}
denom = 1.0 + h_use * h_use*4.0*M_PI*qoverM*q*n_use.n
+ h_use * qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) +
h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(1.0 - ohm.beta_ni)
+ h_use *moverM*nu_ei_effective;
if (bSwitchSave) p_denom_i[iMinor] = denom;
// viz0_coeff_on_Lap_Az = -0.5*h_use*qoverMc*h_use*c*c / denom;
v0.viz /= denom;
if (TEST_VS_MATRIX)
printf("Denom %1.14E = 1+components:\n"
"h_use*h_use*4.0*M_PI*qoverM*q*n_use.n %1.14E \n"
"h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) %1.14E \n"
"h_use*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n)*(1.0-ohm.beta_ni) %1.14E \n"
"h_use *moverM*nu_ei_effective %1.14E \n"
"------------------------------------- new value of viz0 %1.14E \n"
,
denom,
h_use * h_use*4.0*M_PI*qoverM*q*n_use.n,
h_use * qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)),
h_use*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n)*(1.0 - ohm.beta_ni),
h_use *moverM*nu_ei_effective,
v0.viz
);
//if (((TESTTRI))) printf("viz0 divided %1.14E denom %1.14E \n", v0.viz, denom);
ohm.sigma_i_zz = h_use * qoverM / denom;
beta_ie_z = (h_use*h_use*4.0*M_PI*qoverM*q*n_use.n
+ h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))
+ h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne
+ h_use * moverM*nu_ei_effective) / denom;
if (TEST_VS_MATRIX)
printf("ohm.sigma_i_zz %1.14E = hq/M / denom \n"
"beta_ie_z %1.14E components before divide by denom:\n"
"h_use*h_use*4.0*M_PI*qoverM*q*n_use.n %1.14E \n"
"h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) %1.14E \n"
"h_use*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n)*ohm.beta_ne %1.14E \n"
"h_use*moverM*nu_ei_effective %1.14E \n"
"-----------------------------------------------\n",
ohm.sigma_i_zz,
beta_ie_z,
h_use*h_use*4.0*M_PI*qoverM*q*n_use.n,
h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)),
h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne,
h_use * moverM*nu_ei_effective);
if (TESTOHMS) printf("%d v0.vez %1.12E before Azdot LapAz and JxB\n", iMinor, v0.vez);
// ====================================================================
// vez:
v0.vez +=
h_use *qovermc*(AAzdot_k.Azdot
+ h_use * c*c*(LapAz + FOURPI_Q_OVER_C*n_use.n*v0.viz))
+ h_use*qovermc*(v0.vxy + ohm.beta_xy_z*v0.viz ).dot(grad_Az[threadIdx.x]); // v x B
if (TESTVEZ) printf("%d AzdotLapAzcomponent(v0.viz) %1.12E v0.viz %1.12E \n"
"v x B term (v0) %1.12E \n--------------------------\n"
, iMinor,
h_use *qovermc*(AAzdot_k.Azdot
+ h_use * c*c*(LapAz + FOURPI_Q_OVER_C*n_use.n*v0.viz)), v0.viz,
h_use*qovermc*(v0.vxy + ohm.beta_xy_z*v0.viz).dot(grad_Az[threadIdx.x])
);
if (TESTVEZ) printf("%d vh_use *qovermc*(AAzdot_k.Azdot) %1.14E \nhhqc_overm(LapAz) %1.14E LapAz %1.14E \n"
"hh4piqqoverm n viz %1.14E hq/mc v0.vxy.gradAz %1.14E hq/mc beta_xyz viz.gradAz %1.14E \n"
"v0.vxy %1.12E %1.12E grad Az %1.12E %1.12E \n",
iMinor, h_use *qovermc*(AAzdot_k.Azdot),
h_use *qovermc*(h_use * c*c*(LapAz )),
LapAz,
h_use *qovermc*(h_use * c*c*(FOURPI_Q_OVER_C*n_use.n*v0.viz)),
h_use*qovermc*v0.vxy.dot(grad_Az[threadIdx.x]),
h_use*qovermc*(ohm.beta_xy_z*v0.viz).dot(grad_Az[threadIdx.x]),
v0.vxy.x, v0.vxy.y, grad_Az[threadIdx.x].x, grad_Az[threadIdx.x].y
);
// implies:
f64 effect_of_viz0_on_vez0 =
h_use * qovermc*h_use * c*c* FOURPI_Q_OVER_C*n_use.n
+ h_use*qovermc*(ohm.beta_xy_z.dot(grad_Az[threadIdx.x])); // from the instruction above
if (TESTOHMS) printf("%d v0.vez %1.14E before thermal force\n", iMinor, v0.vez);
v0.vez -=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]) + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT));
if (TESTVEZ) printf("%d thermal force %1.14E \n", iMinor, -1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]) + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)));
// could store this from above and put opposite -- dividing by m_e instead of m_i
// overdue..?
if (TESTVEZ) printf("%d v0.vez %1.12E MARKER1 \n", iMinor, v0.vez);
effect_of_viz0_on_vez0 +=
h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni + h_use*nu_ei_effective;
// Apparently we thought to save this INSTEAD of putting it into vez0
// So the question is-- - have we deliberately excluded the effect from vez0 IN THE CASE that we are setting up a linear relationship ?
// NEUE:
v0.vez += (h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni + h_use*nu_ei_effective)*v0.viz
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n)*vn0.z;
denom = 1.0 + (h_use*h_use*4.0*M_PI*q*eoverm*n_use.n
+ h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z)
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z)
+ h_use*nu_ei_effective*(1.0 - beta_ie_z);
if (TEST_VS_MATRIX)
printf("\nPOPOHMS denom_e %1.14E components: \nhh4piqqn/m*(1.0-beta_ie_z) %1.14E grad_Az_dot_beta_xy_z %1.14E \n"
"nu_en_without_ni_ie(1-beta_ne) %1.14E nu_en_ni_ie %1.14E\n"
"hnu_ei_eff %1.14E times_minus_beta_ie_z %1.14E\n\n",
denom,
h_use*h_use*4.0*M_PI*q*eoverm*n_use.n*(1.0 - beta_ie_z),
h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*(1.0 - beta_ie_z),
h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne),
h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(-ohm.beta_ni*beta_ie_z),
h_use*nu_ei_effective,
h_use*nu_ei_effective*(- beta_ie_z)
);
if (TESTVEZ) printf("%d v0.vez %1.12E nu_ei_effective %1.12E v0.viz %1.12E \n"
"beta_ie_z %1.12E nu_en %1.12E denom %1.12E\n", iMinor, v0.vez, nu_ei_effective, v0.viz, beta_ie_z,
M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n), denom);
// vez0_coeff_on_Lap_Az = h_use * h_use*0.5*qovermc* c*c / denom;
ohm.sigma_e_zz =
(-h_use * eoverm
+ h_use * h_use*4.0*M_PI*q*eoverm*n_use.n*ohm.sigma_i_zz
+ h_use *qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*ohm.sigma_i_zz
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni*ohm.sigma_i_zz
+ h_use*nu_ei_effective*ohm.sigma_i_zz)
/ denom;
if (TESTVEZ) printf("%d grad_Az %1.9E %1.9E \n", iMinor, grad_Az[threadIdx.x].x, grad_Az[threadIdx.x].y);
v0.vez /= denom;
effect_of_viz0_on_vez0 /= denom; // of course
if (TESTVEZ) printf("%d v0.vez %1.12E after divide\n", iMinor, v0.vez);
if (bSwitchSave) {
p_denom_e[iMinor] = denom;
p_effect_of_viz0_on_vez0[iMinor] = effect_of_viz0_on_vez0;
p_beta_ie_z[iMinor] = beta_ie_z; // see that doing it this way was not best.
} else {
// #########################################################################################################
// DEBUG: pass graphing parameters through these.
// #########################################################################################################
p_denom_i[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective;
p_denom_e[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n /
(M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective);
};
// Now update viz(Ez):
v0.viz += beta_ie_z * v0.vez;
ohm.sigma_i_zz += beta_ie_z * ohm.sigma_e_zz;
// sigma_e_zz and sigma_i_zz are change in vz for a change in Ez
{
f64 EzShape = GetEzShape(info.pos.modulus());
ohm.sigma_i_zz *= EzShape;
ohm.sigma_e_zz *= EzShape;
}
if (TESTVEZ) printf("%d final v0.vez %1.12E sigma %1.12E \n", iMinor, v0.vez, ohm.sigma_e_zz);
// Think maybe we should get rid of most of this routine out of the subcycle.
// Rate of acceleration over timestep due to resistance, pressure, thermal force etc could be stored.
// Saving off some eqn data isn't so bad when we probably overflow registers and L1 here anyway.
// All we need is to know that we update sigma
// We can do addition of
// ==============================================================================================
p_v0_dest[iMinor] = v0;
p_OhmsCoeffs_dest[iMinor] = ohm;
p_vn0_dest[iMinor] = vn0;
Iz[threadIdx.x] = q*AreaMinor*n_use.n*(v0.viz - v0.vez);
sigma_zz[threadIdx.x] = q*AreaMinor*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz);
// BRING THIS BACK AND CHECK IT ALL OUT:
// if (Iz[threadIdx.x] > 0.0) printf("%d : Iz %1.8E n_use %1.8E v0.viz %1.8E v0.vez %1.8E\n",
// iMinor, Iz[threadIdx.x], n_use.n, v0.viz, v0.vez);
}
else {
// Non-domain triangle or vertex
// ==============================
// Need to decide whether crossing_ins triangle will experience same accel routine as the rest?
// I think yes so go and add it above??
// We said v_r = 0 necessarily to avoid sending mass into ins.
// So how is that achieved there? What about energy loss?
// Need to determine a good way. Given what v_r in tri represents. We construe it to be AT the ins edge so
// ...
Iz[threadIdx.x] = 0.0;
sigma_zz[threadIdx.x] = 0.0;
memset(&(p_v0_dest[iMinor]), 0, sizeof(v4)); // no velocity!
memset(&(p_vn0_dest[iMinor]), 0, sizeof(f64_vec3));
memset(&(p_OhmsCoeffs_dest[iMinor]), 0, sizeof(OhmsCoeffs));
// if ((iMinor < BEGINNING_OF_CENTRAL) && ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)))
// {
// p_AAdot_intermediate[iMinor].Azdot = 0.0;
// Set Az equal to neighbour in every case, after Accelerate routine.
// }
// else {
// Let's make it go right through the middle of a triangle row for simplicity.
//f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
//{
// // Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// // ASSUME we are fed Iz_prescribed.
// //Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
// AreaMinor = p_AreaMinor[iMinor];
// Jz = negative_Iz_per_triangle / AreaMinor; // Iz would come from multiplying back by area and adding.
//};
// AAdot temp = p_AAdot_src[iMinor];
// temp.Azdot += h_use * c*(c*p_LapAz[iMinor]);// +4.0*M_PI*Jz);
// + h_use * ROCAzdot_antiadvect // == 0
// p_AAdot_intermediate[iMinor] = temp; //
// };
};
__syncthreads();
// .Collect Jz = Jz0 + sigma_zz Ez_strength on each minor cell
// .Estimate Ez
// sigma_zz should include EzShape for this minor cell
// The mission if iPass == 0 was passed is to save off Iz0, SigmaIzz.
// First pass set Ez_strength = 0.0.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + k];
Iz[threadIdx.x] += Iz[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + s - 1];
Iz[threadIdx.x] += Iz[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sigma_zz[blockIdx.x] = sigma_zz[0];
p_Iz0[blockIdx.x] = Iz[0];
}
// Wish to make the Jz contribs to Azdot on each side of the ins exactly equal in L1,
// meant making this long routine even longer with collecting Iz_k.
}
__global__ void kernelPopulateResiduals(
f64 * __restrict__ pLapAz,
nvals * __restrict__ p_n_minor,
v4 * __restrict__ p_vie,
f64 * __restrict__ p_residual
) {
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 FourPiovercJz = FOURPI_Q_OVER_C*p_n_minor[iMinor].n*(p_vie[iMinor].viz - p_vie[iMinor].vez);
p_residual[iMinor] = -pLapAz[iMinor] - FourPiovercJz;
}
__global__ void kernelAccelerate_v_from_advection
(
f64 const h_use,
structural * __restrict__ p_info_minor,
nvals * __restrict__ p_n_k, // multiply by old mass ..
f64 * __restrict__ p_AreaMinor_k,
nvals * __restrict__ p_n_plus, // divide by new mass ..
f64 * __restrict__ p_AreaMinor_plus,
v4 * __restrict__ p_vie_k,
f64_vec3 * __restrict__ p_v_n_k,
f64_vec3 * __restrict__ p_MAR_neut, // these contain the mom flux due to advection.
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
// outputs:
v4 * __restrict__ p_vie_dest,
f64_vec3 * __restrict__ p_v_n_dest)
{
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[iMinor];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS))
{
v4 vie_k = p_vie_k[iMinor];
f64_vec3 v_n_k = p_v_n_k[iMinor];
nvals n_k = p_n_k[iMinor];
f64 AreaMinor_k = p_AreaMinor_k[iMinor];
nvals n_dest = p_n_plus[iMinor];
f64 AreaMinor_plus = p_AreaMinor_plus[iMinor];
f64 Nk = n_k.n*AreaMinor_k;
f64 Nnk = n_k.n_n*AreaMinor_k;
f64 Nplus = n_dest.n*AreaMinor_plus;
f64 Nnplus = n_dest.n_n*AreaMinor_plus;
f64_vec3 MAR;
memcpy(&MAR, p_MAR_neut + iMinor, sizeof(f64_vec3));
f64_vec3 v_n;
v_n = (v_n_k*Nnk + h_use * MAR) / Nnplus;
// . We will really need to overview and see if this formula is correct -- did we already account for change in n?
// . Need to check if we would be double-counting to include * oldAreaMinor / newAreaMinor --- I think we counted it.
// The problem is that n's change is the change in N inferred in minor
// so we are preferring to assume that we use the change in N that
// would follow from looking at v on the boundaries of the minor.
// That is what we already rounded up in MAR.
if (TESTVNY2) printf("\n\n%d : v_n_k.y %1.10E v_n.x %1.12E h %1.8E MAR.y %1.10E N %1.10E \n",
iMinor, v_n_k.y, v_n.y, h_use, MAR.y, Nnplus);
if (TESTVNX) printf("\n\n%d : v_n_k.x %1.10E v_n.x %1.12E h %1.8E MAR.x %1.10E N %1.10E \n",
iMinor, v_n_k.x, v_n.x, h_use, MAR.x, Nnplus);
v4 vie;
memcpy(&MAR, p_MAR_ion + iMinor, sizeof(f64_vec3));
//vie.vxy = (vie_k.vxy * n_k.n + h_use * (m_i*MAR.xypart() / ((m_i + m_e)*AreaMinor))) / n_dest.n;
//vie.viz = (vie_k.viz * n_k.n + h_use * MAR.z / (AreaMinor)) / n_dest.n;
// We would PREFER the N_k / N_k+1 version, however
vie.vxy = (vie_k.vxy*Nk + h_use * m_i*MAR.xypart() / (m_i + m_e))/Nplus;
vie.viz = (vie_k.viz*Nk + h_use * MAR.z)/Nplus;
if (TEST_ACCEL_Y) printf("iMinor %d vie_k.vxy.y %1.8E Nk %1.9E Nplus %1.9E nk nplus %1.9E %1.9E \n"
"AreaMinor k plus %1.9E %1.9E intermediate vxy %1.9E MAR_ion %1.9E h_use %1.10E \n",
iMinor, vie_k.vxy.y, Nk, Nplus, n_k.n, n_dest.n, AreaMinor_k, AreaMinor_plus, vie.vxy.y, MAR.y, h_use);
memcpy(&MAR, p_MAR_elec + iMinor, sizeof(f64_vec3));
vie.vxy += h_use * (m_e*MAR.xypart() / ((m_i + m_e)*Nplus));
vie.vez = (vie_k.vez*Nk + h_use * MAR.z) / Nplus;
// 22/11/20 FIX FOR NOW: If wind is blowing outwards, do not increase velocity in CROSSING_INS just due to density decreasing.
// Density loss is loss of momentum at same rate so leave velocity unchanged.
// (We have prevented momentum traffic to vertex minors!)
if ((info.flag == CROSSING_INS) && (vie.vxy.dot(info.pos) > 0.0)) vie = vie_k;
if ((info.flag == CROSSING_INS) && (v_n.dot(info.pos) > 0.0)) v_n = v_n_k;
// Bit messed up, but we've got to try something.
if (TEST_ACCEL_Y) printf("MAR_e %1.9E vxy.y %1.9E \n", MAR.y, vie.vxy.y);
if (TEST_ACCEL_EZ) printf("\n%d vie.vez %1.10E vie_k.vez %1.10E Nk %1.9E Nplus %1.9E oldNvez %1.8E \n"
"Nratio %1.8E h*MAR.z %1.8E Areaminor k %1.9E plus %1.9E ; \n",
iMinor, vie.vez, vie_k.vez, Nk, Nplus, Nk*vie_k.vez, Nplus / Nk, h_use*MAR.z,
AreaMinor_k, AreaMinor_plus);
memcpy(&(p_vie_dest[iMinor]), &vie, sizeof(v4));
p_v_n_dest[iMinor] = v_n;
} else {
if (info.flag == OUTERMOST) {
memcpy(&(p_vie_dest[iMinor]), &(p_vie_k[iMinor]), sizeof(v4));
memcpy(&(p_v_n_dest[iMinor]), &(p_v_n_k[iMinor]), sizeof(f64_vec3));
} else {
memset(&(p_vie_dest[iMinor]), 0, sizeof(v4));
memset(&(p_v_n_dest[iMinor]), 0, sizeof(f64_vec3));
};
}
}
__global__ void kernelPopulateBackwardOhmsLaw(
f64 h_use,
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
f64_vec3 * __restrict__ p_B,
f64 * __restrict__ p_LapAz,
f64_vec2 * __restrict__ p_GradAz,
f64_vec2 * __restrict__ p_GradTe,
nvals * __restrict__ p_n_minor_use,
T3 * __restrict__ p_T_minor_use,
v4 * __restrict__ p_vie_src,
f64_vec3 * __restrict__ p_v_n_src,
AAdot * __restrict__ p_AAdot_src,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ ROCAzdotduetoAdvection,
// Now going to need to go through and see this set 0 or sensible every time.
f64_vec3 * __restrict__ p_vn0_dest,
v4 * __restrict__ p_v0_dest,
OhmsCoeffs * __restrict__ p_OhmsCoeffs_dest,
//AAdot * __restrict__ p_AAdot_intermediate,
f64 * __restrict__ p_Iz0,
f64 * __restrict__ p_sigma_zz,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_effect_of_viz0_on_vez0,
f64 * __restrict__ p_beta_ie_z,
bool const bSwitchSave)
{
// Don't forget we can use 16KB shared memory to save a bit of overspill:
// (16*1024)/(512*8) = 4 doubles only for 512 threads. 128K total register space per SM we think.
__shared__ f64 Iz[threadsPerTileMinor], sigma_zz[threadsPerTileMinor];
// __shared__ f64 Iz_k[threadsPerTileMinor];
__shared__ f64_vec2 omega[threadsPerTileMinor], grad_Az[threadsPerTileMinor],
gradTe[threadsPerTileMinor];
// Putting 8 reduces to 256 simultaneous threads. Experiment with 4 in shared.
// f64 viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az; // THESE APPLY TO FEINT VERSION. ASSUME NOT FEINT FIRST.
v4 v0;
f64 denom, ROCAzdot_antiadvect, AreaMinor;
f64_vec3 vn0;
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x;
structural info = p_info_minor[iMinor];
// Can see no reason not to put OUTERMOST here. No point creating a big gradient of vz to it.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 vie_k = p_vie_src[iMinor];
f64_vec3 v_n_src = p_v_n_src[iMinor];
nvals n_use = p_n_minor_use[iMinor];
AreaMinor = p_AreaMinor[iMinor];
// Are we better off with operator = or with memcpy?
vn0 = v_n_src;
// if ((TESTTRI)) printf("GPU %d vie_k %1.14E %1.14E\n", iMinor, vie_k.vxy.x, vie_k.vxy.y);
{
f64_vec3 MAR;
memcpy(&MAR, p_MAR_neut + iMinor, sizeof(f64_vec3));
// CHECK IT IS INTENDED TO AFFECT Nv
// REVERTED THE EDIT TO USE 1/n -- THIS WILL NOT GIVE CORRECT M.A.R. EFFECT ON INTEGRAL nv
// We need conservation laws around shock fronts.
vn0.x += h_use * (MAR.x / (AreaMinor*n_use.n_n));
// p_one_over_n[iMinor].n_n/ (AreaMinor));
vn0.y += h_use * (MAR.y / (AreaMinor*n_use.n_n));// MomAddRate is addition rate for Nv. Divide by N.
memcpy(&MAR, p_MAR_ion + iMinor, sizeof(f64_vec3));
v0.vxy = vie_k.vxy + h_use * (m_i*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.viz = vie_k.viz + h_use * MAR.z / (n_use.n*AreaMinor);
memcpy(&MAR, p_MAR_elec + iMinor, sizeof(f64_vec3));
v0.vxy += h_use * (m_e*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
if (v0.vez != v0.vez) printf("NANVEZ %d v_k %1.9E MAR.z %1.9E \n", iMinor, vie_k.vez, MAR.z);
if (((TESTTRI))) printf("\nGPU %d a:MAR_e %1.10E %1.10E z %1.10E MAR.y %1.10E Area %1.10E\n", iMinor,
h_use * (m_e*MAR.x / (n_use.n*(m_i + m_e)*AreaMinor)),
h_use * (m_e*MAR.y / (n_use.n*(m_i + m_e)*AreaMinor)),
MAR.z,
MAR.y,
AreaMinor);
}
OhmsCoeffs ohm;
f64 beta_ie_z, LapAz;
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_minor_use[iMinor];
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
//nu_ne_MT = s_en_MT * electron_thermal * n_use.n; // have to multiply by n_e for nu_ne_MT
//nu_ni_MT = s_in_MT * ionneut_thermal * n_use.n;
//nu_in_MT = s_in_MT * ionneut_thermal * n_use.n_n;
//nu_en_MT = s_en_MT * electron_thermal * n_use.n_n;
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*n_use.n*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
if (nu_eiBar != nu_eiBar) printf("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n"
"iMinor %d n_use.n %1.9E lnLambda %1.9E Te %1.9E sqrt %1.9E \n",
iMinor, n_use.n, lnLambda, T.Te, sqrt_Te);
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
};
}
denom = 1.0 + h_use * M_e_over_en* (cross_section_times_thermal_en*n_use.n)
+ h_use*M_i_over_in* (cross_section_times_thermal_in*n_use.n);
vn0 /= denom; // It is now the REDUCED value
ohm.beta_ne = h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n) / denom;
ohm.beta_ni = h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n) / denom;
// Now we do vexy:
grad_Az[threadIdx.x] = p_GradAz[iMinor];
gradTe[threadIdx.x] = p_GradTe[iMinor];
LapAz = p_LapAz[iMinor];
// SOON GET RID OF THIS CRAP:
f64 ROCAzdot_antiadvect = ROCAzdotduetoAdvection[iMinor];
if (((TESTTRI))) printf("GPU %d: LapAz %1.14E\n", CHOSEN, LapAz);
v0.vxy +=
(h_use / ((m_i + m_e)))*(m_n*M_i_over_in*(cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*
(vn0.xypart());
denom = 1.0 + (h_use / (m_i + m_e))*(
m_n* M_i_over_in* (cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*(1.0 - ohm.beta_ne - ohm.beta_ni);
v0.vxy /= denom;
ohm.beta_xy_z = (h_use * q / (c*(m_i + m_e)*denom)) * grad_Az[threadIdx.x]; // coeff on viz-vez
omega[threadIdx.x] = qovermc*p_B[iMinor].xypart();
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*(nu_eHeart*nu_eHeart + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].x*omega[threadIdx.x].x + omega[threadIdx.x].y*omega[threadIdx.x].y + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)));
// if (nu_ei_effective != nu_ei_effective) printf("nu_ei NaN: omega %1.8E %1.8E nu_eHeart %1.8E nu_eiBar %1.8E\n",
// omega[threadIdx.x].x, omega[threadIdx.x].y, nu_eHeart, nu_eiBar);
AAdot AAzdot_k = p_AAdot_src[iMinor];
v0.viz +=
-h_use*qoverMc*(AAzdot_k.Azdot
+ h_use * ROCAzdot_antiadvect + h_use * c*c*LapAz)
- h_use*qoverMc*(v0.vxy).dot(grad_Az[threadIdx.x]);
// Still omega_ce . Check formulas.
v0.viz +=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x])));
v0.viz += h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *vn0.z;
denom = 1.0 + h_use * h_use*4.0*M_PI*qoverM*q*n_use.n
+ h_use * qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) +
h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(1.0 - ohm.beta_ni)
+ h_use *moverM*nu_ei_effective;
if (bSwitchSave) p_denom_i[iMinor] = denom;
// viz0_coeff_on_Lap_Az = -0.5*h_use*qoverMc*h_use*c*c / denom;
v0.viz /= denom;
if (((TESTTRI))) printf("viz0 divided %1.14E denom %1.14E \n", v0.viz, denom);
ohm.sigma_i_zz = h_use * qoverM / denom;
beta_ie_z = (h_use*h_use*4.0*M_PI*qoverM*q*n_use.n
+ h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))
+ h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne
+ h_use * moverM*nu_ei_effective) / denom;
v0.vez +=
h_use *qovermc*(AAzdot_k.Azdot
+ h_use * ROCAzdot_antiadvect
+ h_use * c*c*(LapAz + FOURPI_Q_OVER_C*n_use.n*v0.viz))
+ h_use*qovermc*(v0.vxy + ohm.beta_xy_z*v0.viz).dot(grad_Az[threadIdx.x]);
// implies:
f64 effect_of_viz0_on_vez0 =
h_use * qovermc*h_use * c*c* FOURPI_Q_OVER_C*n_use.n
+ h_use*qovermc*(ohm.beta_xy_z.dot(grad_Az[threadIdx.x]));
v0.vez -=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]) + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT));
// could store this from above and put opposite -- dividing by m_e instead of m_i
// overdue..?
v0.vez += h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(vn0.z + ohm.beta_ni * v0.viz)
+ h_use*nu_ei_effective*v0.viz;
// implies:
effect_of_viz0_on_vez0 +=
h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni + h_use*nu_ei_effective;
denom = 1.0 + (h_use*h_use*4.0*M_PI*q*eoverm*n_use.n
+ h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z)
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z)
+ h_use*nu_ei_effective*(1.0 - beta_ie_z);
// vez0_coeff_on_Lap_Az = h_use * h_use*0.5*qovermc* c*c / denom;
ohm.sigma_e_zz =
(-h_use * eoverm
+ h_use * h_use*4.0*M_PI*q*eoverm*n_use.n*ohm.sigma_i_zz
+ h_use *qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*ohm.sigma_i_zz
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni*ohm.sigma_i_zz
+ h_use*nu_ei_effective*ohm.sigma_i_zz)
/ denom;
v0.vez /= denom;
effect_of_viz0_on_vez0 /= denom; // of course
if (bSwitchSave) {
p_denom_e[iMinor] = denom;
p_effect_of_viz0_on_vez0[iMinor] = effect_of_viz0_on_vez0;
p_beta_ie_z[iMinor] = beta_ie_z; // see that doing it this way was not best.
}
else {
// #########################################################################################################
// DEBUG: pass graphing parameters through these.
// #########################################################################################################
p_denom_i[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective;
p_denom_e[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n /
(M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective);
};
// Now update viz(Ez):
v0.viz += beta_ie_z * v0.vez;
ohm.sigma_i_zz += beta_ie_z * ohm.sigma_e_zz;
// sigma_e_zz and sigma_i_zz are change in vz for a change in Ez
{
f64 EzShape = GetEzShape(info.pos.modulus());
ohm.sigma_i_zz *= EzShape;
ohm.sigma_e_zz *= EzShape;
}
// Think maybe we should get rid of most of this routine out of the subcycle.
// Rate of acceleration over timestep due to resistance, pressure, thermal force etc could be stored.
// Saving off some eqn data isn't so bad when we probably overflow registers and L1 here anyway.
// All we need is to know that we update sigma
// We can do addition of
// ==============================================================================================
p_v0_dest[iMinor] = v0;
p_OhmsCoeffs_dest[iMinor] = ohm;
p_vn0_dest[iMinor] = vn0;
Iz[threadIdx.x] = q*AreaMinor*n_use.n*(v0.viz - v0.vez);
sigma_zz[threadIdx.x] = q*AreaMinor*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz);
}
else {
// Non-domain triangle or vertex
// ==============================
// Need to decide whether crossing_ins triangle will experience same accel routine as the rest?
// I think yes so go and add it above??
// We said v_r = 0 necessarily to avoid sending mass into ins.
// So how is that achieved there? What about energy loss?
// Need to determine a good way. Given what v_r in tri represents. We construe it to be AT the ins edge so
// ...
Iz[threadIdx.x] = 0.0;
sigma_zz[threadIdx.x] = 0.0;
// if ((iMinor < BEGINNING_OF_CENTRAL) && ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)))
// {
// p_AAdot_intermediate[iMinor].Azdot = 0.0;
// Set Az equal to neighbour in every case, after Accelerate routine.
// }
// else {
// Let's make it go right through the middle of a triangle row for simplicity.
//f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
//{
// // Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// // ASSUME we are fed Iz_prescribed.
// //Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
// AreaMinor = p_AreaMinor[iMinor];
// Jz = negative_Iz_per_triangle / AreaMinor; // Iz would come from multiplying back by area and adding.
//};
// AAdot temp = p_AAdot_src[iMinor];
// temp.Azdot += h_use * c*(c*p_LapAz[iMinor]);// +4.0*M_PI*Jz);
// + h_use * ROCAzdot_antiadvect // == 0
// p_AAdot_intermediate[iMinor] = temp; //
// };
};
__syncthreads();
// .Collect Jz = Jz0 + sigma_zz Ez_strength on each minor cell
// .Estimate Ez
// sigma_zz should include EzShape for this minor cell
// The mission if iPass == 0 was passed is to save off Iz0, SigmaIzz.
// First pass set Ez_strength = 0.0.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + k];
Iz[threadIdx.x] += Iz[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + s - 1];
Iz[threadIdx.x] += Iz[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sigma_zz[blockIdx.x] = sigma_zz[0];
p_Iz0[blockIdx.x] = Iz[0];
}
// Wish to make the Jz contribs to Azdot on each side of the ins exactly equal in L1,
// meant making this long routine even longer with collecting Iz_k.
}
/*__global__ void Estimate_Effect_on_Integral_Azdot_from_Jz_and_LapAz(
f64 hstep,
structural * __restrict__ p_info,
nvals * __restrict__ p_nvals_k,
nvals * __restrict__ p_nvals_use,
v4 * __restrict__ p_vie_k,
v4 * __restrict__ p_vie_kplus1,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz,
AAdot * __restrict__ p_Azdot,
f64 * __restrict__ p_tile1, // +ve Jz
f64 * __restrict__ p_tile2, // -ve Jz
f64 * __restrict__ p_tile3, // LapAz
f64 * __restrict__ p_tile4, // integrate Azdot diff
f64 * __restrict__ p_tile5,
f64 * __restrict__ p_tile6
)
{
__shared__ f64 sum1[threadsPerTileMinor];
__shared__ f64 sum2[threadsPerTileMinor];
__shared__ f64 sum3[threadsPerTileMinor];
__shared__ f64 sum4[threadsPerTileMinor];
__shared__ f64 sum5[threadsPerTileMinor];
__shared__ f64 sum6[threadsPerTileMinor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
structural info = p_info[iMinor];
nvals n_k = p_nvals_k[iMinor];
nvals n_use = p_nvals_use[iMinor];
v4 v_k = p_vie_k[iMinor];
v4 v_kplus1 = p_vie_kplus1[iMinor];
f64 AreaMinor = p_AreaMinor[iMinor];
f64 LapAz = p_LapAz[iMinor];
sum1[threadIdx.x] = 0.0;
sum2[threadIdx.x] = 0.0;
sum3[threadIdx.x] = 0.0;
sum4[threadIdx.x] = 0.0;
sum5[threadIdx.x] = 0.0;
sum6[threadIdx.x] = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
sum1[threadIdx.x] =
hstep*c*c*0.5*FOURPI_OVER_C * q*n_k.n*(v_k.viz - v_k.vez)*AreaMinor
+ hstep*c*0.5*FOUR_PI*q*n_use.n*(v_kplus1.viz - v_kplus1.vez)*AreaMinor;
// Was n used consistently?
} else {
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
if (info.flag == REVERSE_JZ_TRI)
sum2[threadIdx.x] = hstep*c*4.0*M_PI*negative_Iz_per_triangle;
}
// make sure we copy from the code:
sum3[threadIdx.x] = hstep*c*c*LapAz*AreaMinor;
sum4[threadIdx.x] = fabs(hstep*c*c*LapAz*AreaMinor);
sum5[threadIdx.x] = p_Azdot[iMinor].Azdot * AreaMinor;
sum6[threadIdx.x] = fabs(p_Azdot[iMinor].Azdot * AreaMinor);
// -----------------------------------------------------------------------------
__syncthreads();
// .Collect Jz = Jz0 + sigma_zz Ez_strength on each minor cell
// .Estimate Ez
// sigma_zz should include EzShape for this minor cell
// The mission if iPass == 0 was passed is to save off Iz0, SigmaIzz.
// First pass set Ez_strength = 0.0.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum1[threadIdx.x] += sum1[threadIdx.x + k];
sum2[threadIdx.x] += sum2[threadIdx.x + k];
sum3[threadIdx.x] += sum3[threadIdx.x + k];
sum4[threadIdx.x] += sum4[threadIdx.x + k];
sum5[threadIdx.x] += sum5[threadIdx.x + k];
sum6[threadIdx.x] += sum6[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum1[threadIdx.x] += sum1[threadIdx.x + s - 1];
sum2[threadIdx.x] += sum2[threadIdx.x + s - 1];
sum3[threadIdx.x] += sum3[threadIdx.x + s - 1];
sum4[threadIdx.x] += sum4[threadIdx.x + s - 1];
sum5[threadIdx.x] += sum5[threadIdx.x + s - 1];
sum6[threadIdx.x] += sum6[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_tile1[blockIdx.x] = sum1[0];
p_tile2[blockIdx.x] = sum2[0];
p_tile3[blockIdx.x] = sum3[0];
p_tile4[blockIdx.x] = sum4[0];
p_tile5[blockIdx.x] = sum5[0];
p_tile6[blockIdx.x] = sum6[0];
}
}*/
__global__ void kernelCalculateVelocityAndAzdot(
f64 h_use,
structural * p_info_minor,
f64_vec3 * __restrict__ p_vn0,
v4 * __restrict__ p_v0,
OhmsCoeffs * __restrict__ p_OhmsCoeffs,
AAdot * __restrict__ p_AAzdot_src,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz, // would it be better just to be loading the Azdot0 relation?
f64 * __restrict__ p_ROCAzdotantiadvect,
AAdot * __restrict__ p_AAzdot_out,
v4 * __restrict__ p_vie_out,
f64_vec3 * __restrict__ p_vn_out)
{
long iMinor = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info_minor[iMinor];
AAdot temp = p_AAzdot_src[iMinor];
temp.Azdot += h_use*(c*c*p_LapAz[iMinor] + p_ROCAzdotantiadvect[iMinor]);
// We did not add LapAz into Azdot already in PopBackwardOhms.
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 v;
nvals n_use = p_n_minor[iMinor];
OhmsCoeffs ohm = p_OhmsCoeffs[iMinor];
v4 v0 = p_v0[iMinor];
f64_vec3 v_n = p_vn0[iMinor]; // 3 sep
v.vez = v0.vez + ohm.sigma_e_zz * Ez_strength; // 2
v.viz = v0.viz + ohm.sigma_i_zz * Ez_strength; // 2
v.vxy = v0.vxy + ohm.beta_xy_z * (v.viz - v.vez); // 4
v_n.x += (ohm.beta_ne + ohm.beta_ni)*v.vxy.x; // 2
v_n.y += (ohm.beta_ne + ohm.beta_ni)*v.vxy.y;
v_n.z += ohm.beta_ne * v.vez + ohm.beta_ni * v.viz;
if (info.flag == CROSSING_INS) {
f64_vec2 rhat = info.pos / info.pos.modulus();
v_n -= Make3((v_n.dotxy(rhat))*rhat, 0.0);
v.vxy -= v.vxy.dot(rhat)*rhat;
}
memcpy(&(p_vie_out[iMinor]), &v, sizeof(v4)); // operator = vs memcpy
p_vn_out[iMinor] = v_n;
if (info.flag == OUTERMOST) {
temp.Azdot = 0.0;
temp.Az = 0.0;
}
else {
// BACKWARD:
temp.Azdot += h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez); // logical for C_INS too
}
if ((TESTTRI2)) printf(
"CVAA iMinor %d v0.vez %1.9E sigma_e_zz %1.9E Ez %1.9E v.vez %1.9E\n",
iMinor, v0.vez, ohm.sigma_e_zz, Ez_strength, v.vez);
}
else {
memset(&(p_vie_out[iMinor]), 0, sizeof(v4));
memset(&(p_vn_out[iMinor]), 0, sizeof(f64_vec3));
f64 Jz = 0.0;
if (info.flag == REVERSE_JZ_TRI)
{
// Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// ASSUME we are fed Iz_prescribed.
//Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
// printf("temp.Azdot %1.10E ", temp.Azdot);
temp.Azdot += h_use*c*FOUR_PI*Jz; // Iz would come from multiplying back by area and adding.
// printf("%d Iz %1.14E Area %1.14E Jz %1.14E Azdot %1.14E \n",
// iMinor,
// negative_Iz_per_triangle, AreaMinor, Jz, temp.Azdot);
};
}
// + h_use * ROCAzdot_antiadvect // == 0
p_AAzdot_out[iMinor] = temp;
// Would rather make this a separate routine beforehand.
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz + data_1.viz
// - data_k.vez - data_1.vez));
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz - data_k.vez));
// intermediate
}
__global__ void kernelCalculateVelocityAndAzdot_noadvect(
f64 h_use,
structural * p_info_minor,
f64_vec3 * __restrict__ p_vn0,
v4 * __restrict__ p_v0,
OhmsCoeffs * __restrict__ p_OhmsCoeffs,
AAdot * __restrict__ p_AAzdot_src,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz, // would it be better just to be loading the Azdot0 relation?
AAdot * __restrict__ p_AAzdot_out,
v4 * __restrict__ p_vie_out,
f64_vec3 * __restrict__ p_vn_out )
{
long iMinor = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info_minor[iMinor];
AAdot temp = p_AAzdot_src[iMinor];
temp.Azdot += h_use*(c*c*p_LapAz[iMinor]);
// We did not add LapAz into Azdot already in PopBackwardOhms.
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 v;
nvals n_use = p_n_minor[iMinor];
OhmsCoeffs ohm = p_OhmsCoeffs[iMinor];
v4 v0 = p_v0[iMinor];
f64_vec3 v_n = p_vn0[iMinor]; // 3 sep
// debug:
long iVertex = iMinor - BEGINNING_OF_CENTRAL;
if (TESTACCEL) printf("iVertex %d v0.xy %1.9E %1.9E\n",
iVertex, v0.vxy.x, v0.vxy.y);
v.vez = v0.vez + ohm.sigma_e_zz * Ez_strength; // 2
v.viz = v0.viz + ohm.sigma_i_zz * Ez_strength; // 2
v.vxy = v0.vxy + ohm.beta_xy_z * (v.viz - v.vez); // 4
if (TESTACCEL) printf("iVertex %d ohm.beta_xz yz %1.9E %1.9E viz %1.9E vez %1.9E effect xy %1.9E %1.9E\n",
iVertex,
ohm.beta_xy_z.x, ohm.beta_xy_z.y,
v.viz,
v.vez,
ohm.beta_xy_z.x * (v.viz - v.vez),
ohm.beta_xy_z.y * (v.viz - v.vez));
//if (TESTACCEL) printf("iVertex %d ohm.beta_yz %1.9E viz %1.9E vez %1.9E effect %1.9E\n",
// iVertex,
// ohm.beta_xy_z.y,
// v.viz,
// v.vez,
// ohm.beta_xy_z.y * (v.viz - v.vez));
v_n.x += (ohm.beta_ne + ohm.beta_ni)*v.vxy.x; // 2
v_n.y += (ohm.beta_ne + ohm.beta_ni)*v.vxy.y;
v_n.z += ohm.beta_ne * v.vez + ohm.beta_ni * v.viz;
if (TESTVNY) printf("%d v_n.y %1.9E since ohm %1.9E v.vxy.y %1.9E \n", iMinor, v_n.y,
(ohm.beta_ne + ohm.beta_ni), v.vxy.y);
if (TESTVNX) printf("%d v_n.y %1.9E since ohm %1.9E v.vxy.y %1.9E \n", iMinor, v_n.x,
(ohm.beta_ne + ohm.beta_ni), v.vxy.x);
// if (info.flag == CROSSING_INS) {
// f64_vec2 rhat = info.pos / info.pos.modulus();
// v_n -= Make3((v_n.dotxy(rhat))*rhat, 0.0);
// v.vxy -= v.vxy.dot(rhat)*rhat;
//
// if (TESTACCEL) printf("v.vxy after negate r component : %1.9E %1.9E\n", v.vxy.x, v.vxy.y);
// }
memcpy(&(p_vie_out[iMinor]), &v, sizeof(v4)); // operator = vs memcpy
p_vn_out[iMinor] = v_n;
// if (info.flag == OUTERMOST) {
// temp.Azdot = 0.0;
// temp.Az = 0.0; // really!
// } else {
// BACKWARD:
temp.Azdot += h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez); // logical for Crossing_INS too
// }
if (TESTACCEL) printf("CVAA:iVertex %d v_out.xy %1.9E %1.9E\n", iVertex, v.vxy.x, v.vxy.y);
if (TESTVEZ) printf("%d CVAA vez %1.11E v0 %1.11E Ez_strength %1.14E sigma %1.14E \n"
"Azdot %1.9E components: k %1.9E h_use*(c*c*p_LapAz) %1.9E hc4piJ %1.9E\n"
"n viz vez %1.14E %1.14E %1.14E\n"
, iMinor, v.vez, v0.vez,
Ez_strength, ohm.sigma_e_zz, temp.Azdot, p_AAzdot_src[iMinor].Azdot, h_use*(c*c*p_LapAz[iMinor]),
h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez),
n_use.n, v.viz, v.vez
);
} else {
memset(&(p_vie_out[iMinor]), 0, sizeof(v4));
memset(&(p_vn_out[iMinor]), 0, sizeof(f64_vec3));
f64 Jz = 0.0;
if (info.flag == REVERSE_JZ_TRI)
{
// Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// ASSUME we are fed Iz_prescribed.
//Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
// printf("temp.Azdot %1.10E ", temp.Azdot);
temp.Azdot += h_use*c*FOUR_PI*Jz; // Iz would come from multiplying back by area and adding.
// printf("%d Iz %1.14E Area %1.14E Jz %1.14E Azdot %1.14E \n",
// iMinor,
// negative_Iz_per_triangle, AreaMinor, Jz, temp.Azdot);
};
}
// + h_use * ROCAzdot_antiadvect // == 0
p_AAzdot_out[iMinor] = temp;
// Would rather make this a separate routine beforehand.
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz + data_1.viz
// - data_k.vez - data_1.vez));
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz - data_k.vez));
// intermediate
}
/*
__global__ void kernelCalculateVelocityAndAzdot_noadvect__debugintegrate(
f64 h_use,
structural * p_info_minor,
f64_vec3 * __restrict__ p_vn0,
v4 * __restrict__ p_v0,
OhmsCoeffs * __restrict__ p_OhmsCoeffs,
AAdot * __restrict__ p_AAzdot_src,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz, // would it be better just to be loading the Azdot0 relation?
AAdot * __restrict__ p_AAzdot_out,
v4 * __restrict__ p_vie_out,
f64_vec3 * __restrict__ p_vn_out,
f64 * __restrict__ p_integ_Jz1,
f64 * __restrict__ p_integ_Jz2,
f64 * __restrict__ p_integ_LapAz
)
{
long iMinor = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ f64 sum_Jzdomain[threadsPerTileMinor];
__shared__ f64 sum_Jzreverse[threadsPerTileMinor];
__shared__ f64 sum_LapAz[threadsPerTileMinor];
structural info = p_info_minor[iMinor];
AAdot temp = p_AAzdot_src[iMinor];
sum_Jzdomain[threadIdx.x] = 0.0;
sum_Jzreverse[threadIdx.x] = 0.0;
temp.Azdot += h_use*(c*c*p_LapAz[iMinor]);
sum_LapAz[threadIdx.x] = c*c*p_LapAz[iMinor] * p_AreaMinor[iMinor];
// We did not add LapAz into Azdot already in PopBackwardOhms.
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 v;
nvals n_use = p_n_minor[iMinor];
OhmsCoeffs ohm = p_OhmsCoeffs[iMinor];
v4 v0 = p_v0[iMinor];
f64_vec3 v_n = p_vn0[iMinor]; // 3 sep
long iVertex = iMinor - BEGINNING_OF_CENTRAL;
if (TESTACCEL) printf("iVertex %d v0.y %1.9E\n", iVertex, v0.vxy.y);
v.vez = v0.vez + ohm.sigma_e_zz * Ez_strength; // 2
v.viz = v0.viz + ohm.sigma_i_zz * Ez_strength; // 2
v.vxy = v0.vxy + ohm.beta_xy_z * (v.viz - v.vez); // 4
if (TESTACCEL) printf("iVertex %d ohm.beta_yz %1.9E viz %1.9E vez %1.9E effect %1.9E\n",
iVertex,
ohm.beta_xy_z.y,
v.viz,
v.vez,
ohm.beta_xy_z * (v.viz - v.vez));
v_n.x += (ohm.beta_ne + ohm.beta_ni)*v.vxy.x; // 2
v_n.y += (ohm.beta_ne + ohm.beta_ni)*v.vxy.y;
v_n.z += ohm.beta_ne * v.vez + ohm.beta_ni * v.viz;
if (TESTVNY) printf("%d v_n.y %1.9E since ohm %1.9E v.vxy.y %1.9E \n", iMinor, v_n.y,
(ohm.beta_ne + ohm.beta_ni), v.vxy.y);
//
// if (info.flag == CROSSING_INS) {
// f64_vec2 rhat = info.pos / info.pos.modulus();
// v_n -= Make3((v_n.dotxy(rhat))*rhat, 0.0);
// v.vxy -= v.vxy.dot(rhat)*rhat;
// }
memcpy(&(p_vie_out[iMinor]), &v, sizeof(v4)); // operator = vs memcpy
p_vn_out[iMinor] = v_n;
temp.Azdot += h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez); // logical for Crossing_INS too
if (TESTACCEL) printf("CVAA:iVertex %d v_out.y %1.9E\n", iVertex, v.vxy.y);
if (TESTVEZ) printf("%d CVAA vez %1.9E v0 %1.9E Ez %1.9E sigma %1.9E\n", iMinor, v.vez, v0.vez,
Ez_strength, ohm.sigma_e_zz);
sum_Jzdomain[threadIdx.x] = c*FOUR_PI*q*n_use.n*(v.viz - v.vez)*p_AreaMinor[iMinor];
} else {
memset(&(p_vie_out[iMinor]), 0, sizeof(v4));
memset(&(p_vn_out[iMinor]), 0, sizeof(f64_vec3));
f64 Jz = 0.0;
if (info.flag == REVERSE_JZ_TRI)
{
// Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// ASSUME we are fed Iz_prescribed.
//Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
temp.Azdot += h_use*c*FOUR_PI*Jz; // Iz would come from multiplying back by area and adding.
sum_Jzreverse[threadIdx.x] = negative_Iz_per_triangle*c*FOUR_PI;
};
}
p_AAzdot_out[iMinor] = temp;
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum_Jzdomain[threadIdx.x] += sum_Jzdomain[threadIdx.x + k];
sum_LapAz[threadIdx.x] += sum_LapAz[threadIdx.x + k];
sum_Jzreverse[threadIdx.x] += sum_Jzreverse[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum_Jzdomain[threadIdx.x] += sum_Jzdomain[threadIdx.x + s - 1];
sum_LapAz[threadIdx.x] += sum_LapAz[threadIdx.x + s - 1];
sum_Jzreverse[threadIdx.x] += sum_Jzreverse[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_integ_Jz1[blockIdx.x] = sum_Jzdomain[0];
p_integ_Jz2[blockIdx.x] = sum_Jzreverse[0];
p_integ_LapAz[blockIdx.x] = sum_LapAz[0];
}
}*/
/*
__global__ void kernelCalculateVelocityAndAzdot_noadvect_SPIT(
f64 h_use,
structural * p_info_minor,
LONG3 * p_tricornerindex,
f64_vec3 * __restrict__ p_vn0,
v4 * __restrict__ p_v0,
OhmsCoeffs * __restrict__ p_OhmsCoeffs,
AAdot * __restrict__ p_AAzdot_src,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz, // would it be better just to be loading the Azdot0 relation?
AAdot * __restrict__ p_AAzdot_out,
v4 * __restrict__ p_vie_out,
f64_vec3 * __restrict__ p_vn_out)
{
long iMinor = blockIdx.x*blockDim.x + threadIdx.x;
bool bReport = false;
if (iMinor < BEGINNING_OF_CENTRAL) {
LONG3 tci = p_tricornerindex[iMinor];
if ((tci.i1 == VERTCHOSEN) || (tci.i2 == VERTCHOSEN) || (tci.i3 == VERTCHOSEN))
bReport = true;
}
else {
if (iMinor - BEGINNING_OF_CENTRAL == VERTCHOSEN) bReport = true;
}
structural info = p_info_minor[iMinor];
AAdot temp = p_AAzdot_src[iMinor];
f64 store_Azdot = temp.Azdot;
temp.Azdot += h_use*(c*c*p_LapAz[iMinor]);
// We did not add LapAz into Azdot already in PopBackwardOhms.
f64 store_hccLap = h_use*c*c*p_LapAz[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 v;
nvals n_use = p_n_minor[iMinor];
OhmsCoeffs ohm = p_OhmsCoeffs[iMinor];
v4 v0 = p_v0[iMinor];
f64_vec3 v_n = p_vn0[iMinor]; // 3 sep
long iVertex = iMinor - BEGINNING_OF_CENTRAL;
v.vez = v0.vez + ohm.sigma_e_zz * Ez_strength; // 2
v.viz = v0.viz + ohm.sigma_i_zz * Ez_strength; // 2
v.vxy = v0.vxy + ohm.beta_xy_z * (v.viz - v.vez); // 4
v_n.x += (ohm.beta_ne + ohm.beta_ni)*v.vxy.x; // 2
v_n.y += (ohm.beta_ne + ohm.beta_ni)*v.vxy.y;
v_n.z += ohm.beta_ne * v.vez + ohm.beta_ni * v.viz;
if (info.flag == CROSSING_INS) {
f64_vec2 rhat = info.pos / info.pos.modulus();
v_n -= Make3((v_n.dotxy(rhat))*rhat, 0.0);
v.vxy -= v.vxy.dot(rhat)*rhat;
}
memcpy(&(p_vie_out[iMinor]), &v, sizeof(v4)); // operator = vs memcpy
p_vn_out[iMinor] = v_n;
if (info.flag == OUTERMOST) {
temp.Azdot = 0.0;
temp.Az = 0.0; // really!
}
else {
// BACKWARD:
temp.Azdot += h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez); // logical for Crossing_INS too
}
if (bReport) printf("%d Azdot_old %1.10E new %1.10E Az %1.10E hccLapAz %1.10E hc4piJz %1.10E n %1.8E vez %1.8E\n", iMinor, store_Azdot, temp.Azdot, temp.Az,
store_hccLap, h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez), n_use.n, v.vez);
} else {
memset(&(p_vie_out[iMinor]), 0, sizeof(v4));
memset(&(p_vn_out[iMinor]), 0, sizeof(f64_vec3));
f64 Jz = 0.0;
if (info.flag == REVERSE_JZ_TRI)
{
// Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// ASSUME we are fed Iz_prescribed.
//Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
// printf("temp.Azdot %1.10E ", temp.Azdot);
temp.Azdot += h_use*c*FOUR_PI*Jz; // Iz would come from multiplying back by area and adding.
// printf("%d Iz %1.14E Area %1.14E Jz %1.14E Azdot %1.14E \n",
// iMinor,
// negative_Iz_per_triangle, AreaMinor, Jz, temp.Azdot);
};
}
// + h_use * ROCAzdot_antiadvect // == 0
p_AAzdot_out[iMinor] = temp;
// Would rather make this a separate routine beforehand.
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz + data_1.viz
// - data_k.vez - data_1.vez));
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz - data_k.vez));
// intermediate
}*/
__global__ void kernelCreateEpsilonAndJacobi_Heat
(
f64 const h_sub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_T_n,
f64 * __restrict__ p_T_i,
f64 * __restrict__ p_T_e,
T3 * p_Tk, // T_k for substep
NTrates * __restrict__ p_NTrates_diffusive,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p__coeffself_n, // what about dividing by N?
f64 * __restrict__ p__coeffself_i,
f64 * __restrict__ p__coeffself_e,
f64 * __restrict__ p__epsilon_n,
f64 * __restrict__ p__epsilon_i,
f64 * __restrict__ p__epsilon_e,
f64 * __restrict__ p__Jacobi_n,
f64 * __restrict__ p__Jacobi_i,
f64 * __restrict__ p__Jacobi_e ,
bool * __restrict__ p_bFailedTest,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskblock,
bool bUseMask
)
{
// 2. Calculate epsilon: given the est of T, eps = T - (T_k +- h sum kappa dot grad T)
// So this is a lot like saying, let's call the actual routine...
// except we also want Jacobi which means we also want coeff on self in epsilon.
// eps= T_putative - (T_k +- h sum kappa dot grad T_putative)
// coeff on self we want to be linearized so it incorporates the assumption that it affects kappa.
// deps/dT = sum [[dkappa/dT = 0.5 kappa/T] dot grad T + kappa dot d/dT grad T]
// However this means if we know kappa dot grad T then we can * by 0.5/T to get dkappa/dT part
// But we had to collect a separate value for kappa dot d/dT grad T.
// We certainly need to somehow modify the existing kappa dot grad T routine here.
// what about dividing by N?
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
if (bUseMask) {
// if (iVertex == VERTCHOSEN) {
// printf("%d : bUseMask %d p_bMaskblock[blockIdx.x] %d blockIdx.x %d\n",
// iVertex, (bUseMask) ? 1 : 0, (p_bMaskblock[blockIdx.x]) ? 1 : 0, blockIdx.x);
// }
if (p_bMaskblock[blockIdx.x] == 0) return;
}
bool bMask[3];
if (bUseMask) {
//memcpy(bMask, p_bMask3 + 3 * iVertex, sizeof(bool) * 3);
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + NUMVERTICES*2];
// if (iVertex == VERTCHOSEN) {
// printf("%d : bUseMask %d p_bMask[2] %d \n",
// iVertex, (bUseMask) ? 1 : 0, (bMask[2]) ? 1 : 0);
// }
// We need to re-do into species anyway. Afterwards.
if ((bMask[0] == 0) &&
(bMask[1] == 0) &&
(bMask[2] == 0)) return; // do not modify anything
}
structural info = p_info_major[iVertex];
if ((info.flag == DOMAIN_VERTEX)) { //|| (info.flag == OUTERMOST)) {
NTrates Rates = p_NTrates_diffusive[iVertex];
nvals n = p_n_major[iVertex];
f64 Area = p_AreaMajor[iVertex];
f64 N = n.n*Area;
f64 Nn = n.n_n*Area;
f64 Tn, Ti, Te, actual_Tn, actual_Ti, actual_Te, epsilon_n, epsilon_i, epsilon_e;
T3 T_k;
memcpy(&T_k, &(p_Tk[iVertex]), sizeof(T3));
if ((bUseMask == 0) || (bMask[0])) {
Tn = p_T_n[iVertex];
actual_Tn = T_k.Tn + (h_sub / Nn)*Rates.NnTn;
epsilon_n = Tn - actual_Tn;
// Try this:
p__Jacobi_n[iVertex] = -(h_sub / sqrt(Nn))*Rates.NnTn / p__coeffself_n[iVertex]; // should never be 0
epsilon_n *= sqrt(Nn);
p__epsilon_n[iVertex] = epsilon_n;
} else {
epsilon_n = 0.0;
actual_Tn = 0.0; // not used
}
if ((bUseMask == 0) || (bMask[1])) {
Ti = p_T_i[iVertex];
actual_Ti = T_k.Ti + (h_sub / N)*Rates.NiTi;
epsilon_i = Ti - actual_Ti;
// Try this:
p__Jacobi_i[iVertex] = -(h_sub / sqrt(N))*Rates.NiTi / p__coeffself_i[iVertex];
// Weighted Least Squares:
epsilon_i *= sqrt(N);
p__epsilon_i[iVertex] = epsilon_i;
} else {
epsilon_i = 0.0;
actual_Ti = 0.0; // not used
};
if ((bUseMask == 0) || (bMask[2])) {
Te = p_T_e[iVertex];
actual_Te = T_k.Te + (h_sub / N)*Rates.NeTe;
epsilon_e = Te - actual_Te;
// Try this:
p__Jacobi_e[iVertex] = -(h_sub / sqrt(N))*Rates.NeTe / p__coeffself_e[iVertex];
epsilon_e *= sqrt(N);
p__epsilon_e[iVertex] = epsilon_e;
//if ((iVertex == VERTCHOSEN))
// printf("iVertex %d Te %1.10E actual_Te %1.9E Tk %1.9E Rates %1.10E epsilon %1.11E Jacobi %1.10E\n",
// iVertex, Te, actual_Te, T_k.Te, Rates.NeTe, epsilon_e, p__Jacobi_e[iVertex]);
} else {
epsilon_e = 0.0;
actual_Te = 0.0; // not used
}
// If sqrt N we care about is 1e4 and T we care about is 1e-14 then we get 1e-10 as the sqrt(N)T to add to create absolute threshold
if (p_bFailedTest != 0) {
if ((epsilon_n*epsilon_n > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Tn*actual_Tn*Nn + 1.0e-10*1.0e-10))
||
(epsilon_i*epsilon_i > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Ti*actual_Ti*N + 1.0e-10*1.0e-10))
||
(epsilon_e*epsilon_e > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Te*actual_Te*N + 1.0e-10*1.0e-10))
)
p_bFailedTest[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
}
//if (p_bFailedTest != 0) {
// if ((epsilon_n*epsilon_n > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Tn*actual_Tn*Nn + 1.0e-30)) ||
// (epsilon_i*epsilon_i > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Ti*actual_Ti*N + 1.0e-30)) ||
// (epsilon_e*epsilon_e > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Te*actual_Te*N + 1.0e-30)) ||
// (actual_Tn < 0.0) || (actual_Ti < 0.0) || (actual_Te < 0.0))
// p_bFailedTest[blockIdx.x] = true;
//}
// It may be T<0 that is the probs, given that we have arbitrary strength of B-pull on some edge.
// 1e-28 = 1e-14 1e-14 so that's small. Up to 1e-22 = 1e-9 1e-14.
// 1e-8 T (so 1e-16 TT) is comparatively quite large -- just past single precision.
// That seems about right for now.
} else {
p__epsilon_n[iVertex] = 0.0;
p__epsilon_i[iVertex] = 0.0;
p__epsilon_e[iVertex] = 0.0;
p__Jacobi_n[iVertex] = 0.0;
p__Jacobi_i[iVertex] = 0.0;
p__Jacobi_e[iVertex] = 0.0;
};
}
__global__ void kernelMultiplyVector(
f64 * __restrict__ p_multiply,
f64 const factor)
{
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
p_multiply[iVertex] *= factor;
}
__global__ void kernelCreateEpsilonAndJacobi_Heat_1species
(
f64 const h_sub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_T,
f64 * p_Tk, // T_k for substep
NTrates * __restrict__ p_NTrates_diffusive,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p__coeffself,
f64 * __restrict__ p__epsilon,
f64 * __restrict__ p__Jacobi,
bool * __restrict__ p_bFailedTest,
bool * __restrict__ p_bMask,
bool * __restrict__ p_bMaskblock,
bool bUseMask,
int species,
bool bIncorporateEps
)
{
// 2. Calculate epsilon: given the est of T, eps = T - (T_k +- h sum kappa dot grad T)
// So this is a lot like saying, let's call the actual routine...
// except we also want Jacobi which means we also want coeff on self in epsilon.
// eps= T_putative - (T_k +- h sum kappa dot grad T_putative)
// coeff on self we want to be linearized so it incorporates the assumption that it affects kappa.
// deps/dT = sum [[dkappa/dT = 0.5 kappa/T] dot grad T + kappa dot d/dT grad T]
// However this means if we know kappa dot grad T then we can * by 0.5/T to get dkappa/dT part
// But we had to collect a separate value for kappa dot d/dT grad T.
// We certainly need to somehow modify the existing kappa dot grad T routine here.
// what about dividing by N?
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
if (TESTHEAT) printf("%d bUseMask %d info.flag %d \n",
iVertex, (bUseMask ? 1 : 0), p_info_major[iVertex].flag);
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
if (bUseMask) {
if (p_bMask[iVertex] == 0) return; // do not modify anything
}
structural info = p_info_major[iVertex];
if ((info.flag == DOMAIN_VERTEX)) { //|| (info.flag == OUTERMOST)) {
NTrates Rates = p_NTrates_diffusive[iVertex];
nvals n = p_n_major[iVertex];
f64 Area = p_AreaMajor[iVertex];
f64 T, actual_T, epsilon;
f64 T_k = p_Tk[iVertex];
f64 N = (species != 0)?(n.n*Area) : (n.n_n*Area);
T = p_T[iVertex];
if (species == 0) actual_T = T_k + (h_sub / N)*Rates.NnTn;
if (species == 1) actual_T = T_k + (h_sub / N)*Rates.NiTi;
if (species == 2) actual_T = T_k + (h_sub / N)*Rates.NeTe;
epsilon = T - actual_T;
p__epsilon[iVertex] = epsilon;
if (bIncorporateEps) {
p__Jacobi[iVertex] = -epsilon / p__coeffself[iVertex]; // should never be 0 // match the other function for a minute
} else {
p__Jacobi[iVertex] = -actual_T;
// Try just doing Richardson beyond the 1st regressor.
}
if (TESTHEAT) printf("%d : T %1.10E T_k %1.10E epsilon %1.10E d/dt NiTi %1.10E hsub/N %1.10E coeffself %1.10E Jacobi %1.10E \n",
iVertex, T, T_k, epsilon, Rates.NiTi, h_sub/N, p__coeffself[iVertex], p__Jacobi[iVertex]);
if (p_bFailedTest != 0) {
if (epsilon*epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_T*actual_T + 1.0e-14*1.0e-14))
p_bFailedTest[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
}
} else {
p__epsilon[iVertex] = 0.0;
p__Jacobi[iVertex] = 0.0;
};
}
__global__ void kernelCreateEpsilonHeat_1species
(
f64 const h_sub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_T,
f64 * __restrict__ p_Tk, // T_k for substep
NTrates * __restrict__ p_NTrates_diffusive,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p__epsilon,
bool * __restrict__ p_bFailedTest,
bool * __restrict__ p_bMask,
bool * __restrict__ p_bMaskblock,
bool bUseMask,
int species
)
{
// 2. Calculate epsilon: given the est of T, eps = T - (T_k +- h sum kappa dot grad T)
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
if (bUseMask) {
if (p_bMask[iVertex] == 0) return; // do not modify anything
}
structural info = p_info_major[iVertex];
if ((info.flag == DOMAIN_VERTEX)) { //|| (info.flag == OUTERMOST)) {
NTrates Rates = p_NTrates_diffusive[iVertex];
nvals n = p_n_major[iVertex];
f64 Area = p_AreaMajor[iVertex];
f64 T, actual_T, epsilon;
f64 T_k = p_Tk[iVertex];
f64 N = (species != 0) ? (n.n*Area) : (n.n_n*Area);
T = p_T[iVertex];
if (species == 0) actual_T = T_k + (h_sub / N)*Rates.NnTn;
if (species == 1) actual_T = T_k + (h_sub / N)*Rates.NiTi;
if (species == 2) actual_T = T_k + (h_sub / N)*Rates.NeTe;
#if SQRTNT
epsilon = sqrt(N)*(T - actual_T);
#if TESTHEAT
if (iVertex == VERTCHOSEN)
printf("%d epsilon %1.14E sqrtN %1.10E T %1.12E T_k %1.12E hsub/N %1.12E dbydt{NnTn} %1.12E\n",
iVertex, epsilon, sqrt(N), T, T_k, h_sub / N, Rates.NnTn);
#endif // TESTHEAT
#else
epsilon = T - actual_T;
#endif
p__epsilon[iVertex] = epsilon;
if (p_bFailedTest != 0) {
//if (epsilon*epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_T*actual_T*N + 1.0e-10*1.0e-10))
// p_bFailedTest[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
// 2021 YEP self, I agree. But it's 1e-10 per 1e-6s.
#if SQRTNT
if (epsilon*epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*N*(T - T_k)*(T-T_k)
+ h_sub*4.0e-4*h_sub*4.0e-4 // absolute deviation from trajectory
+ 1.0e-25*N*T*T_k // floating point error allowance
// Note -- if the last term is negative because T<0 then this almost guarantees fail test.
) {
p_bFailedTest[blockIdx.x] = true;
};
#if TESTHEAT
//if (iVertex == VERTCHOSEN) {
// printf("iVertex %d epsilonsq %1.14E RELPPN %1.9E rel threshold %1.14E\n"
// "absolute threshold %1.14E Floating-point threshold %1.14E threshold total %1.14E \n",
// iVertex, epsilon*epsilon, REL_THRESHOLD_HEAT, REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*N*(T - T_k),
// h_sub*4.0e-4*h_sub*4.0e-4, // absolute deviation from trajectory
// 1.0e-25*N*T*T_k,
// REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*N*(T - T_k)
// + h_sub*4.0e-4*h_sub*4.0e-4 // absolute deviation from trajectory
// + 1.0e-25*N*T*T_k
// );
//};
#endif
#else
// NO ROOT N INVOLVED:
if (epsilon*epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_T*actual_T + 4.0e-14*4.0e-14))
p_bFailedTest[blockIdx.x] = true;
#endif
}
}
else {
p__epsilon[iVertex] = 0.0;
};
}
__global__ void kernelDivideBySqrtN(
f64 * __restrict__ p_regr,
nvals * __restrict__ p_n,
f64 * __restrict__ p_Area,
int const iSpecies
)
{
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
nvals nnn = p_n[iVertex];
f64 n;
if (iSpecies == 0) {
n = nnn.n_n;
} else {
n = nnn.n;
};
if (n > 0.0) {
f64 N = n*p_Area[iVertex];
p_regr[iVertex] /= sqrt(N);
}
}
__global__ void AggregateSmashMatrix(
f64 * __restrict__ p_Jacobianesque_list,
f64 * __restrict__ p_eps,
f64 * __restrict__ p_smash_matrix_block,
f64 * __restrict__ p_smash_vector_block
) {
__shared__ f64 smash_collect[SQUASH_POINTS*threadsPerTileMajor];
__shared__ f64 Jacobian_data[SQUASH_POINTS*threadsPerTileMajor];
// 1. Load in d eps/ d beta
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
memcpy(&(Jacobian_data[threadIdx.x*SQUASH_POINTS]),
&(p_Jacobianesque_list[iMinor*SQUASH_POINTS]), sizeof(f64)*SQUASH_POINTS);
// 2. Loop:
int j;
#pragma unroll
for (int i = 0; i < SQUASH_POINTS; i++)
{
f64 use = Jacobian_data[threadIdx.x*SQUASH_POINTS + i];
for (j = 0; j < SQUASH_POINTS; j++)
smash_collect[threadIdx.x*SQUASH_POINTS + j] = use*Jacobian_data[threadIdx.x*SQUASH_POINTS + j];
// Now add up row vectors we got:
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
#pragma unroll
for (int y = 0; y < SQUASH_POINTS; y++)
smash_collect[threadIdx.x*SQUASH_POINTS + y] += smash_collect[(threadIdx.x + k)*SQUASH_POINTS + y];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
for (int y = 0; y < SQUASH_POINTS; y++)
smash_collect[threadIdx.x*SQUASH_POINTS + y] += smash_collect[(threadIdx.x + s - 1)*SQUASH_POINTS + y];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
memcpy(&(p_smash_matrix_block[blockIdx.x*SQUASH_POINTS*SQUASH_POINTS + i*SQUASH_POINTS]),
smash_collect, sizeof(f64)*SQUASH_POINTS);
};
// And eps vs deps?
f64 epsilon = p_eps[iMinor];
for (j = 0; j < SQUASH_POINTS; j++)
smash_collect[threadIdx.x*SQUASH_POINTS + j] = epsilon*Jacobian_data[threadIdx.x*SQUASH_POINTS + j];
// Now add up row vectors we got:
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
#pragma unroll
for (int y = 0; y < SQUASH_POINTS; y++)
smash_collect[threadIdx.x*SQUASH_POINTS + y] += smash_collect[(threadIdx.x + k)*SQUASH_POINTS + y];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
for (int y = 0; y < SQUASH_POINTS; y++)
smash_collect[threadIdx.x*SQUASH_POINTS + y] += smash_collect[(threadIdx.x + s - 1)*SQUASH_POINTS + y];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
memcpy(&(p_smash_vector_block[blockIdx.x*SQUASH_POINTS]), smash_collect, sizeof(f64)*SQUASH_POINTS);
}
__global__ void kernelCreateEpsilon_Heat_for_Jacobi
(
f64 const h_sub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_T_n,
f64 * __restrict__ p_T_i,
f64 * __restrict__ p_T_e,
T3 * p_Tk, // T_k for substep
NTrates * __restrict__ p_NTrates_diffusive,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p_eps_n,
f64 * __restrict__ p_eps_i,
f64 * __restrict__ p_eps_e,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskblock,
bool bUseMask
)
{
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
bool bMask[3];
if (bUseMask) {
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + NUMVERTICES * 2];
if ((bMask[0] == 0) && (bMask[1] == 0) && (bMask[2] == 0)) return;
};
structural info = p_info_major[iVertex];
if (iVertex == VERTCHOSEN) printf("%d : bMask[2] %d info.flag %d \n",
iVertex, (bMask[2]) ? 1 : 0, info.flag);
if ((info.flag == DOMAIN_VERTEX)) { //|| (info.flag == OUTERMOST)) {
T3 T_k;
f64 Tn, Ti, Te;
memcpy(&T_k, &(p_Tk[iVertex]), sizeof(T3));
NTrates Rates = p_NTrates_diffusive[iVertex];
nvals n = p_n_major[iVertex];
f64 Area = p_AreaMajor[iVertex];
f64 N = n.n*Area;
f64 Nn = n.n_n*Area;
if ((bUseMask == 0) || (bMask[2])) {
Te = p_T_e[iVertex];
f64 actual_Te = T_k.Te + (h_sub / N)*Rates.NeTe;
f64 epsilon_e = Te - actual_Te;
epsilon_e *= sqrt(N);
p_eps_e[iVertex] = epsilon_e;
// if ((iVertex == VERTCHOSEN)) printf("%d : Te %1.9E actual %1.9E Tk %1.9E rates %1.9E epsilon %1.11E \n",
// iVertex,
// Te, actual_Te, T_k.Te, Rates.NeTe, epsilon_e);
};
if ((bUseMask == 0) || (bMask[0])) {
Tn = p_T_n[iVertex];
f64 actual_Tn = T_k.Tn + (h_sub / Nn)*Rates.NnTn;
f64 epsilon_n = Tn - actual_Tn;
epsilon_n *= sqrt(Nn);
p_eps_n[iVertex] = epsilon_n;
};
if ((bUseMask == 0) || (bMask[1])) {
Ti = p_T_i[iVertex];
f64 actual_Ti = T_k.Ti + (h_sub / N)*Rates.NiTi;
f64 epsilon_i = Ti - actual_Ti;
// Weighted Least Squares:
epsilon_i *= sqrt(N);
p_eps_i[iVertex] = epsilon_i;
// if (iVertex == VERTCHOSEN)
// printf("iVertex %d actual_Ti %1.9E Tk %1.9E Rates %1.10E epsilon %1.10E\n",
// iVertex, actual_Ti, T_k.Ti, Rates.NiTi, epsilon_i);
};
};
// NOT NEEDED: ENSURE WE SET EPS TO 0 FIRST INSTEAD.
// else p_eps_n[iVertex] = 0.0;
// p_eps_i[iVertex] = 0.0;
//p_eps_e[iVertex] = 0.0;
}
__global__ void kernelCreateEpsilonAndJacobi(
f64 const h_use,
structural * __restrict__ p_info,
f64 * __restrict__ p_Az_array_next,
f64 * __restrict__ p_Az_array,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma,
f64 * __restrict__ p_LapCoeffSelf,
f64 * __restrict__ p_Lap_Aznext,
f64 * __restrict__ p_epsilon,
f64 * __restrict__ p_Jacobi_x,
bool * __restrict__ p_bFail)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 eps;
structural info = p_info[iMinor];
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL))
{
eps = 0.0; // p_Lap_Aznext[iMinor];
p_Jacobi_x[iMinor] = 0.0; // -eps / p_LapCoeffSelf[iMinor];
// if (iMinor == 0) printf("\nGPU: eps[0] %1.14E LapCoeffself %1.14E \n", eps, p_LapCoeffSelf[iMinor]);
// but we reset it in ResetFrills called for regressor
}
else {
#ifdef MIDPT_A_AND_ACTUALLY_MIDPT_A_NOT_JUST_EFFECT_ON_AZDOT
// WE COULD CHOOSE to leave it so that Az advances with Azdot_k+1 : we don't know a reason why not.
eps = p_Az_array_next[iMinor] - p_Az_array[iMinor]
- h_use * p_gamma[iMinor] * p_Lap_Aznext[iMinor]
- h_use * p_Azdot0[iMinor];
p_Jacobi_x[iMinor] = -eps / (1.0 - h_use * p_gamma[iMinor] * p_LapCoeffSelf[iMinor]);
#else
f64 Aznext = p_Az_array_next[iMinor];
f64 gamma = p_gamma[iMinor];
eps = Aznext - p_Az_array[iMinor] - h_use * gamma * p_Lap_Aznext[iMinor] - h_use*p_Azdot0[iMinor];
// if (iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL) {
// printf("iMinor %d eps %1.9E Aznext %1.9E Azk? %1.9E h_use %1.9E gamma %1.9E LapAz %1.9E Azdot0 %1.9E\n",
// iMinor, eps, Aznext, p_Az_array[iMinor], h_use, gamma, p_Lap_Aznext[iMinor], p_Azdot0[iMinor]);
// }
p_Jacobi_x[iMinor] = -eps / (1.0 - h_use * gamma * p_LapCoeffSelf[iMinor]);
if (p_Jacobi_x[iMinor] != p_Jacobi_x[iMinor]) printf("p_Jacobi_x[%d] was NaN : eps %1.9E gamma %1.9E LCS %1.9E LapAznext %1.9E Azdot0 %1.9E Aznext %1.9E\n",
iMinor, eps, gamma, p_LapCoeffSelf[iMinor], p_Lap_Aznext[iMinor], p_Azdot0[iMinor], Aznext);
// if (iMinor == 32641) printf("32641: eps %1.9E Az %1.12E Azk %1.12E h %1.10E gamma %1.10E LapAz %1.12E "
// "h Azdot0 %1.10E\n",
// eps, p_Az_array_next[iMinor], p_Az_array[iMinor],
// h_use,gamma,
// p_Lap_Aznext[iMinor],
// h_use*p_Azdot0[iMinor]);
#endif
// if (iMinor == 25526) printf("\n\n########\nJacobi_x 25526 GPU: %1.14E eps %1.14E gamma %1.14E LapCoeffself %1.14E\n",
// p_Jacobi_x[iMinor], eps, p_gamma[iMinor], p_LapCoeffSelf[iMinor]);
// if (iMinor == 86412) printf("Jacobi_x 86412 GPU: %1.14E eps %1.14E gamma %1.14E LapCoeffself %1.14E\n",
// p_Jacobi_x[iMinor], eps, p_gamma[iMinor], p_LapCoeffSelf[iMinor]);
// if (iMinor == 69531) printf("Jacobi_x 69531 GPU: %1.14E eps %1.14E gamma %1.14E LapCoeffself %1.14E\n",
// p_Jacobi_x[iMinor], eps, p_gamma[iMinor], p_LapCoeffSelf[iMinor]);
// Typical value for Az is like 100+ so use 0.1 as minimum that we care about, times relthresh.
if (eps*eps > RELTHRESH_AZ_d*RELTHRESH_AZ_d*(Aznext*Aznext + 10.0*10.0)) p_bFail[blockIdx.x] = true;
// This does not seem to be triggering.
};
p_epsilon[iMinor] = eps;
}
__global__ void kernelCreateExplicitStepAz(
f64 const hsub,
f64 * __restrict__ pAzdot0,
f64 * __restrict__ pgamma,
f64 * __restrict__ pLapAz, // we based this off of half-time Az.
f64 * __restrict__ p_result) // = h (Azdot0 + gamma*LapAz)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
p_result[iMinor] = hsub*(pAzdot0[iMinor] + pgamma[iMinor] * pLapAz[iMinor]);
}
__global__ void kernelCreateEpsilon_Az_CG(
f64 const h_use,
structural * __restrict__ p_info,
f64 * __restrict__ p_Az_plus,
f64 * __restrict__ p_Az_k,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma,
f64 * __restrict__ p_Lap_Az,
f64 * __restrict__ p_epsilon,
f64 * __restrict__ p__sqrtfactor,
bool * __restrict__ p_bFail,
bool const bSaveFail)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 eps;
structural info = p_info[iMinor];
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL))
{
eps = 0.0; // p_Lap_Aznext[iMinor];
}
else {
// WE COULD CHOOSE to leave it so that Az advances with Azdot_k+1 : we don't know a reason why not.
f64 sqrtfactor = p__sqrtfactor[iMinor];
f64 one_over_sqrt;
if (sqrtfactor != 0.0) {
one_over_sqrt = 1.0 / sqrtfactor;
} else {
one_over_sqrt = 1.0;
};
f64 Aznext = p_Az_plus[iMinor];
eps = one_over_sqrt*(Aznext - p_Az_k[iMinor] - h_use * p_Azdot0[iMinor])
- sqrtfactor * p_Lap_Az[iMinor]; // notice this is the integrated Lap //
// eps^2 sqrtfactor sqrtfactor = original eps squared.
if (bSaveFail)
if (eps*eps*sqrtfactor*sqrtfactor > 1.0e-10*1.0e-10*(Aznext*Aznext + 1.0*1.0))
p_bFail[blockIdx.x] = true;
// An optimization is probably to store values in shared then amalgamate, send data to global on 1 thread. ?
if (eps != eps) printf("iMinor %d eps %1.9E Aznext %1.9E gamma %1.9E sqrtfactor %1.9E over %1.9E info.flag %d LapAz %1.9E Azdot0 %1.9E\n",
iMinor, eps, Aznext, p_gamma[iMinor], sqrtfactor, one_over_sqrt, info.flag, p_Lap_Az[iMinor], p_Azdot0[iMinor]);
};
p_epsilon[iMinor] = eps;
}
__global__ void kernelSetZero(
f64 * __restrict__ data
) {
long const index = blockDim.x*blockIdx.x + threadIdx.x;
data[index] = 0.0;
}
__global__ void kernelCreate_further_regressor(
structural * __restrict__ p_info,
f64 h_use,
f64 * __restrict__ p_regressor,
f64 * __restrict__ p_Lap_regressor,
f64 * __restrict__ p_LapCoeffSelf,
f64 * __restrict__ p_gamma,
f64 * __restrict__ p_regressor2)
{
long const index = blockDim.x*blockIdx.x + threadIdx.x;
/*
f64 d_eps_by_d_beta;
structural info = p_info[index];
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL))
{
d_eps_by_d_beta = 0.0; // Lap_Jacobi[iMinor]; // try ignoring
// Need to fill this in afterwards by ResetFrills?
}
else {
d_eps_by_d_beta = (p_regressor[index] - h_use * p_gamma[index] * p_Lap_regressor[index]);
};
p_regressor2[index] = d_eps_by_d_beta / (1.0 - h_use * p_gamma[index] * p_LapCoeffSelf[index]);*/
// Try just this instead:
p_regressor2[index] = p_gamma[index] * p_Lap_regressor[index]; // d_eps_by_d_beta / (1.0 - h_use * p_gamma[index] * p_LapCoeffSelf[index]);
}
__global__ void kernelGetLap_minor(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
// __shared__ f64 sum1[threadsPerTileMinor];
// __shared__ f64 sum2[threadsPerTileMinor];
// __shared__ f64 sum3[threadsPerTileMinor];
// 4.5 per thread.
// Not clear if better off with L1 or shared mem in this case?? Probably shared mem.
// For now, stick with idea that vertices have just major indices that come after tris.
// Minor indices are not made contiguous - although it might be better ultimately.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos, integ_grad_Az;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64_vec2 store_centroid = opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 store_first_point = endpt0;
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) iend = tri_len - 2;
// Bear in mind for OUTERMOST, the triangles go clockwise not anticlockwise.
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
if (TESTLAP) printf("vertex %d endpt0 %1.9E %1.9E projendpt0 %1.9E %1.9E \n",
iVertex, endpt0.x, endpt0.y, projendpt0.x, projendpt0.y);
if (TESTLAP) printf("%d Innermost: AreaMinor += %1.10E AreaMinor %1.10E \n",
iVertex, (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x, AreaMinor);
};
// if (info.flag == OUTERMOST) {
// printf("DEBUG: iVertex %d info.neigh_len %d iend %d izTri[0] %d izTri[iend-1] %d izTri[iend-2] %d "
// "flags 0 %d 1 %d 2 %d 3 %d 4 %d 5 %d\n"
// "positions 01234 (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) \n"
// , iVertex, info.neigh_len,
// iend, izTri[0], izTri[iend - 1],
// izTri[iend - 2],
// p_info[izTri[0]].flag, p_info[izTri[1]].flag, p_info[izTri[2]].flag,
// p_info[izTri[3]].flag, p_info[izTri[4]].flag,
// p_info[izTri[0]].pos.x, p_info[izTri[0]].pos.y, p_info[izTri[1]].pos.x, p_info[izTri[1]].pos.y,
// p_info[izTri[2]].pos.x, p_info[izTri[2]].pos.y, p_info[izTri[3]].pos.x, p_info[izTri[3]].pos.y,
// p_info[izTri[4]].pos.x, p_info[izTri[4]].pos.y
// );
//
// if (DIRICHLET == false) {
// endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
// }
// else {
// f64 radius = info.pos.modulus();
// endpt0.project_to_radius(projendpt0,
// 0.5*(FRILL_CENTROID_OUTER_RADIUS_d + radius)); // back of cell for Lap purposes
// // flatten the cell to get wall halfway out to 0 line.
// }
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// We should always call ResetFrillsAz first on the argument, so that if next is a frill then
// we got the correct value as nextAz.
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// Indicates we think 1 is anticlockwise from 0. For OUTERMOST, it's pointing IN so the rest must do too, then we divide out the minus.
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
//if (TESTLAP) printf("vertex %d endpt0 %1.9E %1.9E endpt1 %1.9E %1.9E Area += %1.10E edge_normal.x %1.9E\n",
// iVertex, endpt0.x, endpt0.y, endpt1.x, endpt1.y,
// (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x, edge_normal.x);
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E prevAz %1.8E nextAz %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E Area_quad %1.8E\n",
iVertex, i, izTri[i],
ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral,
area_quadrilateral);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
iprev = i;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
if (TESTLAP) printf("vertex %d endpt1 %1.9E %1.9E projendpt1 %1.9E %1.9E \n",
iVertex, endpt1.x, endpt1.y, projendpt1.x, projendpt1.y);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
if (TESTLAP) printf("vertex %d Innermost: AreaMinor += %1.10E \n",
iVertex, (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x);
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
if (TESTLAP) printf(" vertex %d Innermost: AreaMinor += %1.10E AreaMinor %1.10E \n",
iVertex, (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x,
AreaMinor);
}
if (info.flag == OUTERMOST)
{
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
oppAz = 0.0;
nextAz = 0.0;
if (RADIALDECLINE) {
info.pos.project_to_radius(nextpos, info.pos.modulus() + (FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus())*1.16);
endpt1 = THIRD*(opppos + info.pos + nextpos);
oppAz = prevAz*(prevpos.modulus() / opppos.modulus());
nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
};
if (!RADIALDECLINE) {
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) {
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d ourAz %1.8E oppAz %1.8E prev %1.8E next %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E edgenormal %1.8E %1.8E\n",
iVertex, ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral, edge_normal.x, edge_normal.y);
}
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
// NOW WE ARE GOING TO LOOK OUTWARDS
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
nextAz = 0.0;
endpt1 = THIRD*(opppos + info.pos + nextpos);
if (RADIALDECLINE) {
//This was incorrect
nextAz = p_Az[izTri[0]]*(store_centroid.modulus()/nextpos.modulus());
}
if (!RADIALDECLINE) {
// map radially inwards so that radius is halfway out to the zero arc.
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) {
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d ourAz %1.8E oppAz %1.8E prev %1.8E next %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E edgenormal %1.8E %1.8E\n",
iVertex, ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral, edge_normal.x, edge_normal.y);
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
// WE ARE GOING TO LOOK NORTHEAST
endpt1 = store_first_point;
nextAz = p_Az[izTri[0]];
nextpos = p_info[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) {
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d ourAz %1.8E oppAz %1.8E prev %1.8E next %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E edgenormal %1.8E %1.8E\n",
iVertex, ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral, edge_normal.x, edge_normal.y);
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
if (TESTLAP) printf("LapAz_integ %1.10E AreaMinor %1.10E LapAz %1.10E \n", Our_integral_Lap_Az, AreaMinor,
Our_integral_Lap_Az / AreaMinor);
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
// p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor; // reset just because otherwise we're inconsistent about area/position in a subcycle
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
}
else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
if (prevpos.dot(prevpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
prevAz = ourAz*(info.pos.modulus() / prevpos.modulus());
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
if (opppos.dot(opppos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
oppAz = ourAz*(info.pos.modulus() / opppos.modulus());
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
if (nextpos.dot(nextpos) > 0.999999*0.999999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
}
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
// This shouldn't be necessary anyway but is especially no good if it's not meant to be flat
if (
((opppos.dot(opppos) < 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) || (DIRICHLET) ||
(RADIALDECLINE)) &&
(opppos.dot(opppos) > 1.00001*1.00001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d)
)
{
// neighbour's not a frill, or it's Dirichlet or radial decline looking outwards.
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if ((TESTLAP2) || (Our_integral_Lap_Az != Our_integral_Lap_Az)) {
printf("iMinor %d [i] %d ourAz %1.9E theirs %1.9E prev %1.9E next %1.9E numer %1.9E contrib %1.10E areaquad %1.8E\n",
iMinor, izNeighMinor[i], ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal),
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
area_quadrilateral);
};
}
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
// p_AreaMinor[iMinor] = AreaMinor; // reset for each substep
};
}
__global__ void kernelGetLap_minor_SYMMETRIC(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ p_AreaMinor, // need to save off to multiply back for symmetry
bool const bDivideByArea
)
{
// Symmetric version with circumcenters to define corners of minor cells
// so as to use conjugate gradient for Az.
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos, integ_grad_Az;
f64_vec2 endpt0, endpt1;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
printf("don't call this routine unless the mess is reformed so that drawing triangles between these points will actually produce a Delaunay triangulation. Because if it doesn't, circumcenters aren't in triangles and it will be nonsense.\n");
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
// prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
// prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64_vec2 store_centroid = opppos;
// endpt0 = THIRD * (info.pos + opppos + prevpos);
CalculateCircumcenter(&endpt0, info.pos, opppos, prevpos);
f64_vec2 store_first_point = endpt0;
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) iend = tri_len - 2;
// Bear in mind for OUTERMOST, the triangles go clockwise not anticlockwise.
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
// if (info.flag == OUTERMOST) {
// printf("DEBUG: iVertex %d info.neigh_len %d iend %d izTri[0] %d izTri[iend-1] %d izTri[iend-2] %d "
// "flags 0 %d 1 %d 2 %d 3 %d 4 %d 5 %d\n"
// "positions 01234 (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) \n"
// , iVertex, info.neigh_len,
// iend, izTri[0], izTri[iend - 1],
// izTri[iend - 2],
// p_info[izTri[0]].flag, p_info[izTri[1]].flag, p_info[izTri[2]].flag,
// p_info[izTri[3]].flag, p_info[izTri[4]].flag,
// p_info[izTri[0]].pos.x, p_info[izTri[0]].pos.y, p_info[izTri[1]].pos.x, p_info[izTri[1]].pos.y,
// p_info[izTri[2]].pos.x, p_info[izTri[2]].pos.y, p_info[izTri[3]].pos.x, p_info[izTri[3]].pos.y,
// p_info[izTri[4]].pos.x, p_info[izTri[4]].pos.y
// );
//
// if (DIRICHLET == false) {
// endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
// }
// else {
// f64 radius = info.pos.modulus();
// endpt0.project_to_radius(projendpt0,
// 0.5*(FRILL_CENTROID_OUTER_RADIUS_d + radius)); // back of cell for Lap purposes
// // flatten the cell to get wall halfway out to 0 line.
// }
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// Symmetric, with circumcenters:
// normal_gradient = (oppAz - ourAz) / ((opppos - info.pos).modulus());
// Our_integral_Lap_Az += normal_gradient*edge_normal.modulus();
// Reduce number of square roots:
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
if (Our_integral_Lap_Az != Our_integral_Lap_Az) printf("%d oppAz %1.8E ourAz %1.8E edge_normal.dot(en) %1.8E opposdot %1.8E \n",
iMinor, oppAz, ourAz, edge_normal.dot(edge_normal), ((opppos - info.pos).dot(opppos - info.pos)));
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E delta_out %1.8E delta_edge %1.8E\n",
iVertex, i, izTri[i],
ourAz, oppAz,
(opppos - info.pos).modulus(),
edge_normal.modulus()
);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
iprev = i;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
}
if (info.flag == OUTERMOST)
{
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
if (!RADIALDECLINE) {
oppAz = 0.0;
nextAz = 0.0;
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos); // THIRD*(opppos + info.pos + nextpos);
}
if (RADIALDECLINE) {
info.pos.project_to_radius(nextpos, info.pos.modulus() + (FRILL_CENTROID_OUTER_RADIUS_d-info.pos.modulus())*1.16 );
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos); // THIRD*(opppos + info.pos + nextpos);
oppAz = prevAz*(prevpos.modulus() / opppos.modulus());
nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
}
// nextpos directly above our own but only on a level with the other frill centroids
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (DIRICHLET || RADIALDECLINE)
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
// "map radially inwards so that radius is halfway out to the zero arc:"
// no can do... nor should we need to since the edge is equidistant from both points that generated it.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
nextAz = 0.0;
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos); // THIRD*(opppos + info.pos + nextpos);
if (RADIALDECLINE)
nextAz = p_Az[izTri[0]] * (store_centroid.modulus() / nextpos.modulus());
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
if (DIRICHLET || (RADIALDECLINE))
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
// WE ARE GOING TO LOOK NORTHEAST
endpt1 = store_first_point;
// nextAz = p_Az[izTri[0]]; // cancelled because nextAz is not used
nextpos = p_info[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
if (DIRICHLET || (RADIALDECLINE))
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
if (Our_integral_Lap_Az != Our_integral_Lap_Az) printf(" at dirichlet oppAz %1.8E ourAz %1.8E edge_normal.dot(en) %1.8E opposdot %1.8E \n",
oppAz, ourAz, edge_normal.dot(edge_normal), ((opppos - info.pos).dot(opppos - info.pos)));
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
// MULTIPLY!!
if (bDivideByArea) Our_integral_Lap_Az /= AreaMinor;
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az;// / AreaMinor;
// WE NO LONGER DIVIDE BY AreaMinor
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor;
if (AreaMinor < 0.0) printf("iVertex %d : AreaMinor %1.10E \n", iVertex, AreaMinor);
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
} else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
// prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
// prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
} else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
if (opppos.dot(opppos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
oppAz = ourAz*(info.pos.modulus() / opppos.modulus());
}
CalculateCircumcenter(&endpt0, info.pos, opppos, prevpos);
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
if (nextpos.dot(nextpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
}
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos);
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
//f64_vec2 integ_grad_Az;
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
//f64 area_quadrilateral = 0.5*(
// (info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
// + (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
// + (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
// + (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
// );
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (
((opppos.dot(opppos) < 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) || (DIRICHLET) || (RADIALDECLINE)) &&
(opppos.dot(opppos) > 1.00001*1.00001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d)
)
{
// neighbour's not a frill, or it's Dirichlet looking outwards. Or radial decline.
// Symmetric, with circumcenters:
// normal_gradient = (oppAz - ourAz) / ((opppos - info.pos).modulus());
// Our_integral_Lap_Az += normal_gradient*edge_normal.modulus();
// Reduce number of square roots:
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
if (Our_integral_Lap_Az != Our_integral_Lap_Az)
printf("oppAz %1.8E ourAz %1.8E edge_normal.dot(en) %1.8E opposdot %1.8E \n",
oppAz, ourAz, edge_normal.dot(edge_normal), ((opppos - info.pos).dot(opppos - info.pos)));
// Is there a cunning way to get rid of sqrt. We know that edge_normal faces the same way as opppos-info.pos...
// thus, (opppos-info.pos).dot(edge_normal) = product of moduli. Hmmmm.
}
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
//if (iMinor == 57364) printf("%d AreaMinor %1.8E contrib %1.8E, endpt0.x %1.9E endpt0.y %1.9E endpt1.x %1.9E endpt1.y %1.9E edge.x %1.8E info.pos %1.9E %1.9E oppos %1.9E %1.9E\n",
// iMinor, AreaMinor, (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x,
// endpt0.x, endpt0.y, endpt1.x, endpt1.y, edge_normal.x,
// info.pos.x, info.pos.y, opppos.x, opppos.y);
endpt0 = endpt1;
// prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
if (bDivideByArea) Our_integral_Lap_Az /= AreaMinor;
p_LapAz[iMinor] = Our_integral_Lap_Az;// / AreaMinor;
p_AreaMinor[iMinor] = AreaMinor; // reset for each substep // careful what we pass it
if (AreaMinor < 0.0) printf("%d : AreaMinor %1.10E \n", iMinor, AreaMinor);
};
}
/*__global__ void kernelGetLap_minor__sum(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_integralLapAz,
f64 * __restrict__ p_integralVT,
f64 * __restrict__ p_integralTV,
f64 * __restrict__ p_integralTT
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64 integralLapAz[threadsPerTileMinor];
__shared__ f64 integralLapVT[threadsPerTileMinor];
__shared__ f64 integralLapTV[threadsPerTileMinor];
__shared__ f64 integralLapTT[threadsPerTileMinor];
// __shared__ f64 sum1[threadsPerTileMinor];
// __shared__ f64 sum2[threadsPerTileMinor];
// __shared__ f64 sum3[threadsPerTileMinor];
// 4.5 per thread.
// Not clear if better off with L1 or shared mem in this case?? Probably shared mem.
// For now, stick with idea that vertices have just major indices that come after tris.
// Minor indices are not made contiguous - although it might be better ultimately.
integralLapAz[threadIdx.x] = 0.0;
integralLapVT[threadIdx.x] = 0.0;
integralLapTV[threadIdx.x] = 0.0;
integralLapTT[threadIdx.x] = 0.0;
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
} else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2; // We ignore frills -- how is this okay?
// It's OK for flat BC - just ignore frills always
#ifndef FLATAZBC
printf("not ok\n");
#endif
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
} else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
};
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal, integ_grad_Az;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapVT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E\n"
"pos %1.9E %1.9E prevpos %1.9E %1.9E opppos %1.9E %1.9E nextpost %1.9E %1.9E\n",
iVertex, i, izTri[i],
ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y
);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
++iprev;
}; // next i
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// Now add on the final sides to give area:
// 3 4
// 2 1 0
// endpt0=endpt1 is now the point north of edge facing 2 anyway.
f64_vec2 projendpt1;
if (info.flag == OUTERMOST) {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
} else {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
};
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// line between out-projected points
// Not sure this AreaMinor is the right one.
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
// NEW ADDITION:
#ifdef RADIALDECLINEAZBC
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / FRILL_CENTROID_OUTER_RADIUS_d;
}
#endif
#ifdef DIRICHLETAZBC
// In this case let the value beyond OUTERMOST be taken as 0 for now;
// we can then substitute the value of the 1D radial array if we can use that.
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / (2.0*(FRILL_CENTROID_OUTER_RADIUS_d-info.pos.modulus()));
}
#endif
// if it's flat do nothing
};
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor; // reset just because otherwise we're inconsistent about area/position in a subcycle
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
} else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
// This shouldn't be necessary anyway but is especially no good if it's not meant to be flat
#ifndef FLATAZBC
printf("This bit needs to change.\n");
#endif
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
// neighbour's not a frill
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (izNeighMinor[i] >= BEGINNING_OF_CENTRAL) {
integralLapTV[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
} else {
integralLapTT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
};
if (TESTTRI3) {
printf("iMinor %d i %d izNeighMinor[i] %d ourAz %1.9E theirs %1.9E contrib %1.12E \n"
"ourpos %1.9E %1.9E prev %1.9E %1.9E out %1.9E %1.9E nex %1.9E %1.9E"
"PBC %d \n",
iMinor, i, izNeighMinor[i], ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
szPBC[i]);
};
};
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iMinor] = AreaMinor; // reset for each substep
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + k];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + k];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + k];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + s - 1];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + s - 1];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + s - 1];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_integralLapAz[blockIdx.x] = integralLapAz[0];
p_integralVT[blockIdx.x] = integralLapVT[0];
p_integralTV[blockIdx.x] = integralLapTV[0];
p_integralTT[blockIdx.x] = integralLapTT[0];
}
}
*/
/*
__global__ void kernelGetLap_minor__sum_placecontribs(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_integralLapAz,
f64 * __restrict__ p_integralVT,
f64 * __restrict__ p_integralTV,
f64 * __restrict__ p_integralTT,
f64 * __restrict__ p_contriblist
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64 integralLapAz[threadsPerTileMinor];
__shared__ f64 integralLapVT[threadsPerTileMinor];
__shared__ f64 integralLapTV[threadsPerTileMinor];
__shared__ f64 integralLapTT[threadsPerTileMinor];
// __shared__ f64 sum1[threadsPerTileMinor];
// __shared__ f64 sum2[threadsPerTileMinor];
// __shared__ f64 sum3[threadsPerTileMinor];
// 4.5 per thread.
// Not clear if better off with L1 or shared mem in this case?? Probably shared mem.
// For now, stick with idea that vertices have just major indices that come after tris.
// Minor indices are not made contiguous - although it might be better ultimately.
integralLapAz[threadIdx.x] = 0.0;
integralLapVT[threadIdx.x] = 0.0;
integralLapTV[threadIdx.x] = 0.0;
integralLapTT[threadIdx.x] = 0.0;
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
long izneighminorneigh[6];
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2; // We ignore frills -- how is this okay?
// It's OK for flat BC - just ignore frills always
#ifndef FLATAZBC
printf("not ok\n");
#endif
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
}
else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
};
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal, integ_grad_Az;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapVT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
memcpy(izneighminorneigh, p_izNeighMinor + 6 * izTri[i], sizeof(long) * 6);
int j = 0;
while ((j < 6) && (izneighminorneigh[j] != iVertex + BEGINNING_OF_CENTRAL)) j++;
if (j == 6) {
printf("ERROR ERROR ERROR %d %d \n", iVertex, izTri[i]);
}
else {
p_contriblist[izTri[i] * 6 + j] = integ_grad_Az.dot(edge_normal) / area_quadrilateral;
};
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E \n",
iVertex, i, izTri[i],
ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
++iprev;
}; // next i
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// Now add on the final sides to give area:
// 3 4
// 2 1 0
// endpt0=endpt1 is now the point north of edge facing 2 anyway.
f64_vec2 projendpt1;
if (info.flag == OUTERMOST) {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
}
else {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
};
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// line between out-projected points
// Not sure this AreaMinor is the right one.
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
// NEW ADDITION:
#ifdef RADIALDECLINEAZBC
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / FRILL_CENTROID_OUTER_RADIUS_d;
}
#endif
#ifdef DIRICHLETAZBC
// In this case let the value beyond OUTERMOST be taken as 0 for now;
// we can then substitute the value of the 1D radial array if we can use that.
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / (2.0*(FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus()));
}
#endif
// if it's flat do nothing
};
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor; // reset just because otherwise we're inconsistent about area/position in a subcycle
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
}
else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
// This shouldn't be necessary anyway but is especially no good if it's not meant to be flat
#ifndef FLATAZBC
printf("This bit needs to change.\n");
#endif
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
// neighbour's not a frill
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (izNeighMinor[i] >= BEGINNING_OF_CENTRAL) {
integralLapTV[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
}
else {
integralLapTT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
};
if (TESTTRI3) {
printf("iMinor %d izNeighMinor[i] %d ourAz %1.9E theirs %1.9E contrib %1.12E \n",
iMinor, izNeighMinor[i], ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral);
};
};
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iMinor] = AreaMinor; // reset for each substep
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + k];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + k];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + k];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + s - 1];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + s - 1];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + s - 1];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_integralLapAz[blockIdx.x] = integralLapAz[0];
p_integralVT[blockIdx.x] = integralLapVT[0];
p_integralTV[blockIdx.x] = integralLapTV[0];
p_integralTT[blockIdx.x] = integralLapTT[0];
}
}*/
/*
__global__ void kernelGetLap_minor__sum_detectcontribs(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_integralLapAz,
f64 * __restrict__ p_integralVT,
f64 * __restrict__ p_integralTV,
f64 * __restrict__ p_integralTT,
f64 * __restrict__ p_contriblist
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64 integralLapAz[threadsPerTileMinor];
__shared__ f64 integralLapVT[threadsPerTileMinor];
__shared__ f64 integralLapTV[threadsPerTileMinor];
__shared__ f64 integralLapTT[threadsPerTileMinor];
// __shared__ f64 sum1[threadsPerTileMinor];
// __shared__ f64 sum2[threadsPerTileMinor];
// __shared__ f64 sum3[threadsPerTileMinor];
// 4.5 per thread.
// Not clear if better off with L1 or shared mem in this case?? Probably shared mem.
// For now, stick with idea that vertices have just major indices that come after tris.
// Minor indices are not made contiguous - although it might be better ultimately.
integralLapAz[threadIdx.x] = 0.0;
integralLapVT[threadIdx.x] = 0.0;
integralLapTV[threadIdx.x] = 0.0;
integralLapTT[threadIdx.x] = 0.0;
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
long izneighminorneigh[6];
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2; // We ignore frills -- how is this okay?
// It's OK for flat BC - just ignore frills always
#ifndef FLATAZBC
printf("not ok\n");
#endif
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
}
else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
};
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal, integ_grad_Az;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapVT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E \n",
iVertex, i, izTri[i],
ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
++iprev;
}; // next i
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// Now add on the final sides to give area:
// 3 4
// 2 1 0
// endpt0=endpt1 is now the point north of edge facing 2 anyway.
f64_vec2 projendpt1;
if (info.flag == OUTERMOST) {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
}
else {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
};
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// line between out-projected points
// Not sure this AreaMinor is the right one.
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
// NEW ADDITION:
#ifdef RADIALDECLINEAZBC
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / FRILL_CENTROID_OUTER_RADIUS_d;
}
#endif
#ifdef DIRICHLETAZBC
// In this case let the value beyond OUTERMOST be taken as 0 for now;
// we can then substitute the value of the 1D radial array if we can use that.
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / (2.0*(FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus()));
}
#endif
// if it's flat do nothing
};
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor; // reset just because otherwise we're inconsistent about area/position in a subcycle
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
}
else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
// This shouldn't be necessary anyway but is especially no good if it's not meant to be flat
#ifndef FLATAZBC
printf("This bit needs to change.\n");
#endif
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
// neighbour's not a frill
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (izNeighMinor[i] >= BEGINNING_OF_CENTRAL) {
integralLapTV[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
f64 contrib = integ_grad_Az.dot(edge_normal) / area_quadrilateral;
f64 alleged = p_contriblist[iMinor * 6 + i];
f64 sum = (alleged + contrib);
if (fabs(sum) > 1.0e-4) printf("%d from %d : contrib %1.14E alleged %1.14E sum %1.8E\n",
iMinor, izNeighMinor[i], contrib, alleged, sum);
}
else {
integralLapTT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
};
if (TESTTRI3) {
printf("iMinor %d izNeighMinor[i] %d ourAz %1.9E theirs %1.9E contrib %1.12E \n",
iMinor, izNeighMinor[i], ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral);
};
};
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iMinor] = AreaMinor; // reset for each substep
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + k];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + k];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + k];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + s - 1];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + s - 1];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + s - 1];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_integralLapAz[blockIdx.x] = integralLapAz[0];
p_integralVT[blockIdx.x] = integralLapVT[0];
p_integralTV[blockIdx.x] = integralLapTV[0];
p_integralTT[blockIdx.x] = integralLapTT[0];
}
}*/
__global__ void kernelComputeJacobianValues(
structural * __restrict__ p_info,
// f64 * __restrict__ p_Aznext,
// f64 * __restrict__ p_Azk,
// f64 * __restrict__ pAzdot0,
f64 * __restrict__ pgamma,
f64 const h_use,
long * __restrict__ p_indic,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_Jacobianesque_list)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor]; // 16K for these - plentiful
// __shared__ f64 shared_Az[threadsPerTileMinor];
// __shared__ f64 shared_Az_verts[threadsPerTileMajor]; // 4.5 things
f64 d_eps_by_dbeta_j[SQUASH_POINTS]; // 24 max
// __shared__ f64 d_eps_by_dbeta_j_verts[SQUASH_POINTS*threadsPerTileMajor];
// need to acquire sums of products of these so need 1 for every tri and vertex
// ...
// would have been better off by far not trying to preserve data, but simply splitting into 2 routines.
// Maybe we should be accumulating in-between instead. Better.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info[iMinor].pos;
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
};
__syncthreads();
f64_vec2 opppos, prevpos, nextpos;
int iWhich, j;
int iWhichPrev, iWhichSelf, iWhichNext, iWhichOpp;
if (threadIdx.x < threadsPerTileMajor) {
iWhichSelf = p_indic[iVertex + BEGINNING_OF_CENTRAL];
memset(d_eps_by_dbeta_j, 0, sizeof(f64)*SQUASH_POINTS);
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
// ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
iWhichPrev = p_indic[izTri[iprev]];
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
} else {
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
iWhichOpp = p_indic[izTri[i]];
// Handle case that prev is a frill. What to do then?
// Not sure which way numbers go.But either way if prev is a frill then it's our 0th tri that is the governor.
f64 prevfactor = 1.0;
f64 nextfactor;
if ((info.flag == INNERMOST) &&
(prevpos.dot(prevpos) < 1.0000001*1.0000001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
iWhichPrev = iWhichOpp;
};
if ((info.flag == OUTERMOST) && (prevpos.dot(prevpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
iWhichPrev = iWhichOpp;
prevfactor = (opppos.modulus() / prevpos.modulus());
};
f64_vec2 store_centroid = opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 store_first_point = endpt0;
short inext, iend = tri_len;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
iend = tri_len - 2;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
iWhichNext = p_indic[izTri[inext]];
nextfactor = 1.0;
if ((info.flag == INNERMOST) &&
(nextpos.dot(nextpos) < 1.0000001*1.0000001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
iWhichNext = iWhichOpp;
};
if ((info.flag == OUTERMOST) &&
(nextpos.dot(nextpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
iWhichNext = iWhichOpp;
nextfactor = (opppos.modulus() / nextpos.modulus());
};
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
// Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (iWhichPrev > 0) {
d_eps_by_dbeta_j[iWhichPrev-1] += prevfactor*0.5*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
// Just add unnomralized here
}
if (iWhichOpp > 0) {
d_eps_by_dbeta_j[iWhichOpp-1] += 0.5*((nextpos.y-prevpos.y)*edge_normal.x
- (nextpos.x-prevpos.x)*edge_normal.y) / area_quadrilateral;
// Just add unnomralized here
}
if (iWhichNext > 0) {
d_eps_by_dbeta_j[iWhichNext-1] += nextfactor*0.5*((info.pos.y-opppos.y)*edge_normal.x
+ (opppos.x-info.pos.x)*edge_normal.y) / area_quadrilateral;
// Just add unnomralized here
}
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] += 0.5*((prevpos.y-nextpos.y)*edge_normal.x
- (prevpos.x-nextpos.x)*edge_normal.y) / area_quadrilateral;
// Just add unnomralized here
}
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
iWhichPrev = iWhichOpp;
iWhichOpp = iWhichNext;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
}
if (info.flag == OUTERMOST)
{
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
f64 opp_prev = 0.0, next_ours = 0.0;
if (RADIALDECLINE) {
info.pos.project_to_radius(nextpos, info.pos.modulus() + (FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus())*1.16);
endpt1 = THIRD*(opppos + info.pos + nextpos);
opp_prev = (prevpos.modulus() / opppos.modulus());
next_ours = (info.pos.modulus() / nextpos.modulus());
};
if (!RADIALDECLINE) {
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) {
// iWhichPrev is already set to previous iWhichOpp.
// hold on to it
if (iWhichPrev > 0) {
d_eps_by_dbeta_j[iWhichPrev - 1]
+= (0.5*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y)
+ 0.5*opp_prev*(
(nextpos.y-prevpos.y)*edge_normal.x
-(nextpos.x-prevpos.x)*edge_normal.y
)
)/ area_quadrilateral;
}
//iWhichSelf = p_indic[iVertex + BEGINNING_OF_CENTRAL]; // already set.
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] += (0.5*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y)
+ 0.5*next_ours*(
(info.pos.y-opppos.y)*edge_normal.x
- (info.pos.x-opppos.x)*edge_normal.y
)
)/ area_quadrilateral;
}
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
// NOW WE ARE GOING TO LOOK OUTWARDS
// iprev IS NOT UPDATED
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
f64 next_0 = 0.0;
endpt1 = THIRD*(opppos + info.pos + nextpos);
if (RADIALDECLINE) {
next_0 = (store_centroid.modulus() / nextpos.modulus());
}
if (!RADIALDECLINE) {
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
//
// So now ... opp_prev is still there and must attribute the prev coefficient
// to the prev-1 index
//
// next_ours is now opp, and must attribute the opp coefficient to ourselves
//
// and the next coefficient now applies for index 0 with coeff next_0
// We have not updated iprev.
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
// WE ARE NOW LOOKING DIRECTLY OUTWARDS.
// "iprev" is now the previous one to prev and still relevant in case of radial decline
// next_ours is now for the opposite one
// next_0 relates the next position to the effect of the 0th value.
if (RADIALDECLINE) {
if (iWhichPrev > 0) {
d_eps_by_dbeta_j[iWhichPrev - 1] +=
0.5*opp_prev*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] +=
0.5*(1.0-next_ours)*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y) / area_quadrilateral;
};
iWhichNext = p_indic[izTri[0]];
if (iWhichNext > 0) {
d_eps_by_dbeta_j[iWhichNext - 1] +=
0.5*next_0*((info.pos.y - opppos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
};
if (DIRICHLET) {
// May be nonsense.
iWhich = p_indic[izTri[iprev]]; //frill!
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
iWhich = p_indic[izTri[inext]]; // frill!
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((info.pos.y - opppos.y)*edge_normal.x
+ (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
iWhich = p_indic[iVertex + BEGINNING_OF_CENTRAL];
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y) / area_quadrilateral;
};
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
// WE ARE GOING TO LOOK NORTHEAST
endpt1 = store_first_point;
nextpos = p_info[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (RADIALDECLINE) {
// prevAz = next_ours* self [iWhichSelf]
// oppAz = next_0 * 0th value [iWhichNext]
// nextAz = 0th value [iWhichNext]
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] +=
0.5*next_ours*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
}
if (iWhichNext > 0) {
d_eps_by_dbeta_j[iWhichNext - 1] +=
0.5*(next_0*((nextpos.y - prevpos.y)*edge_normal.x
- (nextpos.x - prevpos.x)*edge_normal.y)
+
(info.pos.y-opppos.y)*edge_normal.x
- (opppos.y-info.pos.y)*edge_normal.y
) / area_quadrilateral;
}
}
if (DIRICHLET) {
iWhich = p_indic[izTri[i]]; // frill!
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((nextpos.y - prevpos.y)*edge_normal.x
- (nextpos.x - prevpos.x)*edge_normal.y) / area_quadrilateral;
}
iWhich = p_indic[izTri[inext]];
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((info.pos.y - opppos.y)*edge_normal.x
+ (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
iWhich = p_indic[iVertex + BEGINNING_OF_CENTRAL];
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y) / area_quadrilateral;
};
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
f64 gamma = pgamma[iVertex + BEGINNING_OF_CENTRAL];
for (iWhich = 0; iWhich < SQUASH_POINTS; iWhich++)
d_eps_by_dbeta_j[iWhich] *= -h_use*gamma/AreaMinor;
// d eps_i / d x_j = [i==j]*1 - h gamma d[Lap here]/dx_j
if (p_indic[iVertex + BEGINNING_OF_CENTRAL] > 0) d_eps_by_dbeta_j[p_indic[iVertex + BEGINNING_OF_CENTRAL] - 1] += 1.0;
// p_indic[iVertex + BEGINNING_OF_CENTRAL]-1 is the number of its volley. Stupid system.
// For simplicity let's say we save off into global memory.
memcpy(&(p_Jacobianesque_list[(iVertex + BEGINNING_OF_CENTRAL)*SQUASH_POINTS]),
d_eps_by_dbeta_j, sizeof(f64)*SQUASH_POINTS); // d eps_i / dbeta_j
// if (iVertex + BEGINNING_OF_CENTRAL == MyMaxIndex) {
// for (j = 0; j < SQUASH_POINTS; j++)
// printf("%d : coeff %d : %1.9E \n", iVertex + BEGINNING_OF_CENTRAL, j,
// d_eps_by_dbeta_j[j]);
// }
}; // was thread in the first half of the block
memset(d_eps_by_dbeta_j, 0, sizeof(f64)*SQUASH_POINTS);
f64 prevfactor, oppfactor, nextfactor;
info = p_info[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) return;
iWhichSelf = p_indic[iMinor];
// p_LapAz[iMinor] = 0.0;
// } else {
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
if ((prevpos.dot(prevpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
// outer frill under radial decline:
//prevAz = ourAz*(info.pos.modulus() / nextpos.modulus());
// do this by resetting iWhichPrev and a factor. !!
iWhichPrev = iWhichSelf;
prevfactor = (info.pos.modulus() / nextpos.modulus());
} else {
iWhichPrev = p_indic[izNeighMinor[iprev]];
prevfactor = 1.0;
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
} else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
if ((opppos.dot(opppos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
// outer frill
//oppAz = ourAz*(info.pos.modulus() / nextpos.modulus());
iWhichOpp = iWhichSelf;
oppfactor = (info.pos.modulus() / nextpos.modulus());
} else {
iWhichOpp = p_indic[izNeighMinor[i]];
oppfactor = 1.0;
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
iprev = i - 1; if (iprev < 0) iprev = 5;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
} else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
if ((nextpos.dot(nextpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
// outer frill
//nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
iWhichNext = iWhichSelf;
nextfactor = (info.pos.modulus() / nextpos.modulus());
} else {
iWhichNext = p_indic[izNeighMinor[inext]];
nextfactor = 1.0;
};
// ______________________________________________________-
//f64_vec2 integ_grad_Az;
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
if (((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
|| (DIRICHLET) || (RADIALDECLINE))
&&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
// Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (iWhichPrev > 0) {
d_eps_by_dbeta_j[iWhichPrev - 1] += 0.5*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
}
if (iWhichOpp > 0) {
d_eps_by_dbeta_j[iWhichOpp - 1] += 0.5*((nextpos.y - prevpos.y)*edge_normal.x
- (nextpos.x - prevpos.x)*edge_normal.y) / area_quadrilateral;
}
if (iWhichNext > 0) {
d_eps_by_dbeta_j[iWhichNext - 1] += 0.5*((info.pos.y - opppos.y)*edge_normal.x
+ (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] += 0.5*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y) / area_quadrilateral;
}
};
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevpos = opppos;
opppos = nextpos;
iWhichPrev = iWhichOpp;
prevfactor = oppfactor;
iWhichOpp = iWhichNext;
oppfactor = nextfactor;
};
f64 gamma = pgamma[iMinor];
for (iWhich = 0; iWhich < SQUASH_POINTS; iWhich++)
d_eps_by_dbeta_j[iWhich] *= -h_use*gamma / AreaMinor;
// d eps_i / d x_j = [i==j]*1 - h gamma d[Lap here]/dx_j
//if (p_indic[iMinor] > SQUASH_POINTS) {
// printf("ERROR %d p_indic[iMinor] %d \n", iMinor, p_indic[iMinor]);
// // $$$$$$$$$$$
// // DEBUG
// // $$$$$$$$$$$
//} else {
if (iWhichSelf > 0) d_eps_by_dbeta_j[iWhichSelf - 1] += 1.0;
//}
// p_indic[iMinor]-1 is the number of its volley. Stupid system.
// For simplicity let's say we save off into global memory.
// if (p_indic[iMinor]>0) printf("indic %d found at %d; deps = %1.9E\n", p_indic[iMinor], iMinor,
// d_eps_by_dbeta_j[p_indic[iMinor] - 1]);
//
memcpy(&(p_Jacobianesque_list[iMinor*SQUASH_POINTS]),d_eps_by_dbeta_j,
sizeof(f64)*SQUASH_POINTS); // d eps_i / dbeta_j
// if (iMinor == MyMaxIndex) {
// for (j = 0; j < SQUASH_POINTS; j++)
// printf("%d : coeff %d : %1.9E \n", iMinor, j, d_eps_by_dbeta_j[j]);
// };
//};
}
__global__ void kernelGetLapCoeffs_and_min(
structural * __restrict__ p_info,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapCoeffSelf,
f64 * __restrict__ p_min_array,
long * __restrict__ p_min_index)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ char shared_flag[threadsPerTileMinor];
__shared__ f64 mincoeffself[threadsPerTileMinor];
__shared__ long iMin[threadsPerTileMinor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
structural info = p_info[iMinor];
shared_pos[threadIdx.x] = info.pos;
shared_flag[threadIdx.x] = info.flag;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
};
__syncthreads();
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
mincoeffself[threadIdx.x] = 0.0;
iMin[threadIdx.x] = -1;
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az_contrib_from_own_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
f64_vec2 store_first_point = endpt0;
short iend = tri_len;
f64_vec2 projendpt0;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) iend = tri_len - 2;
// Bear in mind for OUTERMOST, the triangles go clockwise not anticlockwise.
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(1.0)*(info.pos.y - nextpos.y)
+ (1.0)*(prevpos.y - info.pos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(1.0)*(info.pos.x - nextpos.x)
+ (1.0)*(prevpos.x - info.pos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
// if (iVertex + BEGINNING_OF_CENTRAL == CHOSEN) {
// printf("%d contrib %1.14E %d \nourpos %1.14E %1.14E opppos %1.14E %1.14E \n"
// "prevpos nextpos %1.14E %1.14E %1.14E %1.14E\n"
// "szPBC[i] %d area_quadrilateral %1.14E \n",
// iVertex + BEGINNING_OF_CENTRAL,
// integ_grad_Az.dot(edge_normal) / area_quadrilateral,
// izTri[i],
// info.pos.x,info.pos.y,opppos.x,opppos.y,
// prevpos.x,prevpos.y,nextpos.x,nextpos.y,
// (int)szPBC[i],area_quadrilateral);
// }
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
++iprev;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
}
if (info.flag == OUTERMOST)
{
f64_vec2 integ_grad_Az;
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
f64 facc = 0.0;
if (RADIALDECLINE) {
info.pos.project_to_radius(nextpos, info.pos.modulus() + (FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus())*1.16);
endpt1 = THIRD*(opppos + info.pos + nextpos);
facc = (info.pos.modulus() / nextpos.modulus());
}
if (!RADIALDECLINE) {
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
+ (1.0)*(prevpos.y - nextpos.y)
+ facc*(info.pos.y-opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
+(1.0)*(prevpos.x - nextpos.x)
+ facc*(info.pos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD*(opppos + info.pos + nextpos);
if (!RADIALDECLINE) {
// map radially inwards so that radius is halfway out to the zero arc.
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(1.0)*(prevpos.y - nextpos.y)
+ facc*(nextpos.y-prevpos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(1.0)*(prevpos.x - nextpos.x)
+ facc*(nextpos.x-prevpos.x)
);
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
endpt1 = store_first_point;
nextpos = p_info[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(1.0)*(prevpos.y - nextpos.y)
+ facc*(opppos.y - info.pos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(1.0)*(prevpos.x - nextpos.x)
+ facc*(opppos.x - info.pos.x)
);
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
p_LapCoeffSelf[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az_contrib_from_own_Az / AreaMinor;
mincoeffself[threadIdx.x] = p_LapCoeffSelf[iVertex + BEGINNING_OF_CENTRAL];
iMin[threadIdx.x] = iVertex + BEGINNING_OF_CENTRAL;
// All vertices can count for this.
}; // was thread in the first half of the block
info = p_info[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Look at simulation.cpp
// Treatment of FRILLS :
p_LapCoeffSelf[iMinor] = -1.0;
// LapCoefftri[iMinor][3] = 1.0; // neighbour 0
}
else {
f64 Our_integral_Lap_Az_contrib_from_own_Az = 0.0;
f64 AreaMinor = 0.0;
f64 prevfac = 0.0, nextfac = 0.0, oppfac = 0.0;
short iprev = 5; short inext, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
if (prevpos.dot(prevpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
prevfac = (info.pos.modulus() / prevpos.modulus());
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
if (opppos.dot(opppos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
oppfac = (info.pos.modulus() / opppos.modulus());
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
if (nextpos.dot(nextpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
nextfac = (info.pos.modulus() / nextpos.modulus());
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
integ_grad_Az.x = 0.5*(prevpos.y - nextpos.y);
integ_grad_Az.y = -0.5*(prevpos.x - nextpos.x);
integ_grad_Az.x = 0.5*(
(1.0 + nextfac)*(info.pos.y - nextpos.y)
+ (prevfac + 1.0)*(prevpos.y - info.pos.y)
+ (oppfac + prevfac)*(opppos.y - prevpos.y)
+ (nextfac + oppfac)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(1.0 + nextfac)*(info.pos.x - nextpos.x)
+ (prevfac + 1.0)*(prevpos.x - info.pos.x)
+ (oppfac + prevfac)*(opppos.x - prevpos.x)
+ (nextfac + oppfac)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (
((opppos.dot(opppos) < 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) || (DIRICHLET)
|| (RADIALDECLINE)) &&
(opppos.dot(opppos) > 1.00001*1.00001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d)
)
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevfac = oppfac;
oppfac = nextfac;
iprev = i;
// There is an even quicker way which is to rotate pointers. No memcpy needed.
};
p_LapCoeffSelf[iMinor] = Our_integral_Lap_Az_contrib_from_own_Az / AreaMinor;
if (p_LapCoeffSelf[iMinor] < mincoeffself[threadIdx.x])
{
mincoeffself[threadIdx.x] = p_LapCoeffSelf[iMinor];
iMin[threadIdx.x] = iMinor;
};
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
if (mincoeffself[threadIdx.x] > mincoeffself[threadIdx.x + k])
{
mincoeffself[threadIdx.x] = mincoeffself[threadIdx.x + k];
iMin[threadIdx.x] = iMin[threadIdx.x + k];
}
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
if (mincoeffself[threadIdx.x] > mincoeffself[threadIdx.x + s-1])
{
mincoeffself[threadIdx.x] = mincoeffself[threadIdx.x + s-1];
iMin[threadIdx.x] = iMin[threadIdx.x + s-1];
}
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_min_array[blockIdx.x] = mincoeffself[threadIdx.x];
p_min_index[blockIdx.x] = iMin[threadIdx.x];
}
}
/*
__global__ void kernelGetLapCoeffs_and_min_DEBUG(
structural * __restrict__ p_info,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor, //B
f64 * __restrict__ p_LapCoeffSelf,
f64 * __restrict__ p_min_array,
long * __restrict__ p_min_index) //B
{
Note many changes since this was used; delete and go again.
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ char shared_flag[threadsPerTileMinor];
__shared__ f64 mincoeffself[threadsPerTileMinor];
__shared__ long iMin[threadsPerTileMinor]; // B
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
// code A
structural info = p_info[iMinor];
shared_pos[threadIdx.x] = info.pos;
shared_flag[threadIdx.x] = info.flag;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
};
__syncthreads();
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
mincoeffself[threadIdx.x] = 0.0;
iMin[threadIdx.x] = -1;
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az_contrib_from_own_Az = 0.0;
f64 AreaMinor = 0.0;
p_LapCoeffSelf[iVertex + BEGINNING_OF_CENTRAL] = 0.0;
mincoeffself[threadIdx.x] = p_LapCoeffSelf[iVertex + BEGINNING_OF_CENTRAL];
iMin[threadIdx.x] = iVertex + BEGINNING_OF_CENTRAL; // B
// All vertices can count for this.
}; // was thread in the first half of the block
// 2nd commenting
info = p_info[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Look at simulation.cpp
// Treatment of FRILLS :
p_LapCoeffSelf[iMinor] = -1.0;
// LapCoefftri[iMinor][3] = 1.0; // neighbour 0
}
else {
f64 Our_integral_Lap_Az_contrib_from_own_Az = 0.0;
f64 AreaMinor = 0.0;
p_LapCoeffSelf[iMinor] = 0.0;
if (p_LapCoeffSelf[iMinor] < mincoeffself[threadIdx.x])
{
mincoeffself[threadIdx.x] = p_LapCoeffSelf[iMinor];
iMin[threadIdx.x] = iMinor;
};
};
// still fails without the following.
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
if (mincoeffself[threadIdx.x] > mincoeffself[threadIdx.x + k])
{
mincoeffself[threadIdx.x] = mincoeffself[threadIdx.x + k];
iMin[threadIdx.x] = iMin[threadIdx.x + k];
}
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
if (mincoeffself[threadIdx.x] > mincoeffself[threadIdx.x + s - 1])
{
mincoeffself[threadIdx.x] = mincoeffself[threadIdx.x + s - 1];
iMin[threadIdx.x] = iMin[threadIdx.x + s - 1];
}
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_min_array[blockIdx.x] = mincoeffself[threadIdx.x];
p_min_index[blockIdx.x] = iMin[threadIdx.x];
}
}*/
// Correct disposition of routines:
// --- union of T and [v + v_overall] -- uses n_shards --> pressure, momflux, grad Te
// --- union of T and [v + v_overall] -- uses n_n shards --> neutral pressure, neutral momflux
// --- Az,Azdot + v_overall -- runs for whole domain ---> Lap A, curl A, grad A, grad Adot, ROCAz, ROCAzdot
// ^^ base off of GetLap_minor.
// Worst case number of vars:
// (4+2)*1.5+6.5 <-- because we use v_vertex. + 3 for positions.
// What can we stick in L1? n_cent we could.
// We should be aiming a ratio 3:1 from shared:L1, if registers are small.
// For tris we are using n_shards from shared points.
// And it is for tris that we require vertex data v to be present.
// Idea: vertex code determines array of 12 relevant n and sticks them into shared.
// Only saved us 1 var. 9 + 6 + 3 = 18.
// Still there is premature optimization here -- none of this happens OFTEN.
// ever called?
/*
__global__ void kernelCreate_pressure_gradT_and_gradA_LapA_CurlA_minor(
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_minor,
AAdot * __restrict__ p_AAdot,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just so we can handle insulator
f64_vec2 * __restrict__ p_GradTe,
f64_vec2 * __restrict__ p_GradAz,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ ROCAzduetoAdvection,
f64 * __restrict__ ROCAzdotduetoAdvection,
f64_vec2 * __restrict__ p_v_overall_minor,
f64_vec3 * __restrict__ p_B,
f64 * __restrict__ p_AreaMinor
)
{
// Getting this down to 8 vars we could have 512 threads (12 vars/thread total with vertex vars)
// Down to 6 -> 9 total -> 600+ threads
// Worry later.
__shared__ T2 shared_T[threadsPerTileMinor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Azdot[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
// Problem: we only have room for 1 at a time. Have to run again with n_n. Too bad.
// Live with it and push through.
// This applies to both vertices and triangles. And putting in L1 unshared is not better.
// We can imagine doing it some other way but using shards is true to the design that was created on CPU.
// Of course this means we'd be better off putting
// We could also argue that with shards for n_ion in memory we are better off doing an overwrite and doing stuff for nv also.
// never mind that for now
__shared__ T2 shared_T_verts[threadsPerTileMajor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64 shared_Azdot_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
// There is a good argument for splitting out A,Adot to a separate routine.
// That way we could have 10.5 => 585 ie 576 = 288*2 threads.
// Here we got (2+1+1+2)*1.5 = 9 , + 6.5 = 15.5 -> 384 minor threads max.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
{
AAdot temp = p_AAdot[iMinor];
shared_Az[threadIdx.x] = temp.Az;
shared_Azdot[threadIdx.x] = temp.Azdot;
}
{
T3 T_ = p_T_minor[iMinor];
shared_T[threadIdx.x].Te = T_.Te;
shared_T[threadIdx.x].Ti = T_.Ti;
}
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
AAdot temp = p_AAdot[iVertex + BEGINNING_OF_CENTRAL];
shared_Az_verts[threadIdx.x] = temp.Az;
shared_Azdot_verts[threadIdx.x] = temp.Azdot;
T3 T_ = p_T_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_T_verts[threadIdx.x].Te = T_.Te;
shared_T_verts[threadIdx.x].Ti = T_.Ti; // MOVED THIS OUT OF the following branch to see it match CPU
if (info.flag == DOMAIN_VERTEX) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
}
else {
// save several bus trips;
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
//shared_T_verts[threadIdx.x].Te = 0.0;
//shared_T_verts[threadIdx.x].Ti = 0.0;
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
};
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64 ourAzdot, oppAzdot, prevAzdot, nextAzdot;
f64_vec2 opppos, prevpos, nextpos;
T2 oppT, prevT, nextT;
//nvals our_n, opp_n, prev_n, next_n;
f64_vec2 Our_integral_curl_Az, Our_integral_grad_Az, Our_integral_grad_Azdot, Our_integral_grad_Te;
f64 Our_integral_Lap_Az;
if (threadIdx.x < threadsPerTileMajor) {
Our_integral_curl_Az.x = 0.0;
Our_integral_curl_Az.y = 0.0;
Our_integral_grad_Az.x = 0.0;
Our_integral_grad_Az.y = 0.0;
Our_integral_grad_Azdot.x = 0.0;
Our_integral_grad_Azdot.y = 0.0;
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
f64_vec3 MAR_ion, MAR_elec;
memcpy(&(MAR_ion), &(p_MAR_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
memcpy(&(MAR_elec), &(p_MAR_elec[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
ourAz = shared_Az_verts[threadIdx.x];
ourAzdot = shared_Azdot_verts[threadIdx.x];
if (info.flag == DOMAIN_VERTEX) {
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevT = shared_T[izTri[iprev] - StartMinor];
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevAzdot = shared_Azdot[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
T3 prev_T = p_T_minor[izTri[iprev]];
prevT.Te = prev_T.Te; prevT.Ti = prev_T.Ti;
AAdot temp = p_AAdot[izTri[iprev]];
prevAz = temp.Az;
prevAzdot = temp.Azdot;
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppT = shared_T[izTri[i] - StartMinor];
oppAz = shared_Az[izTri[i] - StartMinor];
oppAzdot = shared_Azdot[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
T3 opp_T = p_T_minor[izTri[i]];
oppT.Te = opp_T.Te; oppT.Ti = opp_T.Ti;
AAdot temp = p_AAdot[izTri[i]];
oppAz = temp.Az;
oppAzdot = temp.Azdot;
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
// Think carefully: DOMAIN vertex cases for n,T ...
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt1, endpt0 = THIRD * (info.pos + opppos + prevpos);
short iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2;
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
}
else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
}
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextT = shared_T[izTri[inext] - StartMinor];
nextAz = shared_Az[izTri[inext] - StartMinor];
nextAzdot = shared_Azdot[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
T3 next_T = p_T_minor[izTri[inext]];
nextT.Te = next_T.Te; nextT.Ti = next_T.Ti;
AAdot temp = p_AAdot[izTri[inext]];
nextAz = temp.Az;
nextAzdot = temp.Azdot;
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
T2 T0, T1;
f64 n1;
T0.Te = THIRD* (prevT.Te + shared_T_verts[threadIdx.x].Te + oppT.Te);
T1.Te = THIRD * (nextT.Te + shared_T_verts[threadIdx.x].Te + oppT.Te);
T0.Ti = THIRD * (prevT.Ti + shared_T_verts[threadIdx.x].Ti + oppT.Ti);
T1.Ti = THIRD * (nextT.Ti + shared_T_verts[threadIdx.x].Ti + oppT.Ti);
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
// So this is pretty stupid ---
// If shardmodel went for flat then we have decided that there is no pressure gradient affecting v here.
// Mind you we didn't expect it to be flat nearly as often as it is flat.
// Think carefully about what pressure we want to feel.
// It makes a kind of sense if you have a cliff of density then you feel it in the triangle in between.
// But that won't push points apart. It just sends stuff through the wall.
// It's a shame we can't just use actual n values to infer gradient over a region.
// It probably creates wobbles in v as well, because if we move fast particles at edge then we leave
// Behind a still-lower v in the vertex-centered minor.
// The scheme is kind of skewiffifying.
// Assume neighs 0,1 are relevant to border with tri 0 minor
// To get integral grad we add the averages along the edges times edge_normals
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// if (iVertex == VERT1) {
// printf("GPUpressure %d MAR_ion.x %1.12E contrib.x %1.12E n0 %1.12E Ti0 %1.9E n1 %1.9E Ti1 %1.9E edge_normal.x %1.12E \n",
// CHOSEN, MAR_ion.x,
// -0.5*(n0*T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.x,
// n0, T0.Ti, n1, T1.Ti, edge_normal.x);
// }
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
// if (iVertex + BEGINNING_OF_CENTRAL == CHOSEN)
// printf("GPU %d : GradTe contrib %1.14E %1.14E Te %1.14E opp %1.14E next %1.14E prev %1.14E edge_normal %1.14E %1.14E\n", iVertex + BEGINNING_OF_CENTRAL,
// 0.5*(T0.Te + T1.Te) * edge_normal.x,
//0.5*(T0.Te + T1.Te) * edge_normal.y,
// shared_T_verts[threadIdx.x].Te, oppT.Te, nextT.Te, prevT.Te,
//edge_normal.x, edge_normal.y);
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
f64 Azdot_edge = SIXTH * (2.0*ourAzdot + 2.0*oppAzdot + prevAzdot + nextAzdot);
Our_integral_grad_Azdot += Azdot_edge * edge_normal;
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az += Az_edge * (endpt1 - endpt0);
// Missing a factor of 3 possibly?
// ??????????????????????????????????????????????????????????????
// if (Az_edge != Az_edge)
// printf("GPU vert %d Az_edge %1.14E oppAz %1.14E endpt1 %1.14E %1.14E Integ_curl %1.14E %1.14E\n",
// iVertex, Az_edge, oppAz, endpt1.x,endpt1.y, Our_integral_curl_Az.x, Our_integral_curl_Az.y
// );
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prevAz = oppAz;
prevAzdot = oppAzdot;
prevT = oppT;
opppos = nextpos;
oppAz = nextAz;
oppAzdot = nextAzdot;
oppT = nextT;
}; // next i
//if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// // This will never happen because we just asked info.flag == DOMAIN_VERTEX !!
// // Now add on the final sides to give area:
// // 3 4
// // 2 1 0
// // endpt0=endpt1 is now the point north of edge facing 2 anyway.
// f64_vec2 projendpt1;
// if (info.flag == OUTERMOST) {
// endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
// }
// else {
// endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
// };
// edge_normal.x = projendpt1.y - endpt1.y;
// edge_normal.y = endpt1.x - projendpt1.x;
// AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
// edge_normal.x = projendpt0.y - projendpt1.y;
// edge_normal.y = projendpt1.x - projendpt0.x;
// AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// // line between out-projected points
//};
p_GradAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Az / AreaMinor;
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_GradTe[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Te / AreaMinor;
p_B[iVertex + BEGINNING_OF_CENTRAL] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT);
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor;
// if (iVertex + BEGINNING_OF_CENTRAL == CHOSEN) printf("Our_integral_grad_Te.x %1.14E AreaMinor %1.14E\n\n",
// Our_integral_grad_Te.x, AreaMinor);
// wow :
f64_vec2 overall_v_ours = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
ROCAzduetoAdvection[iVertex + BEGINNING_OF_CENTRAL] = overall_v_ours.dot(Our_integral_grad_Az / AreaMinor);
ROCAzdotduetoAdvection[iVertex + BEGINNING_OF_CENTRAL] = overall_v_ours.dot(Our_integral_grad_Azdot / AreaMinor);
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iVertex + BEGINNING_OF_CENTRAL, &MAR_ion, sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex + BEGINNING_OF_CENTRAL, &MAR_elec, sizeof(f64_vec3));
}
else {
// NOT domain vertex: Do Az, Azdot only:
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevAzdot = shared_Azdot[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
AAdot temp = p_AAdot[izTri[iprev]];
prevAz = temp.Az;
prevAzdot = temp.Azdot;
prevpos = p_info_minor[izTri[iprev]].pos;
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
oppAzdot = shared_Azdot[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
AAdot temp = p_AAdot[izTri[i]];
oppAz = temp.Az;
oppAzdot = temp.Azdot;
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1;
short iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2;
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
}
else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
}
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextAzdot = shared_Azdot[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
AAdot temp = p_AAdot[izTri[inext]];
nextAz = temp.Az;
nextAzdot = temp.Azdot;
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64_vec2 integ_grad_Az;
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
// To get integral grad we add the averages along the edges times edge_normals
// f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
// f64 Azdot_edge = SIXTH * (2.0*ourAzdot + 2.0*oppAzdot + prevAzdot + nextAzdot);
// Our_integral_grad_Azdot += Azdot_edge * edge_normal;
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0);
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
prevAz = oppAz;
prevAzdot = oppAzdot;
opppos = nextpos;
oppAz = nextAz;
oppAzdot = nextAzdot;
}; // next i
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// Now add on the final sides to give area:
// 3 4
// 2 1 0
// endpt0=endpt1 is now the point north of edge facing 2 anyway.
f64_vec2 projendpt1;
if (info.flag == OUTERMOST) {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
}
else {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
};
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// line between out-projected points
};
p_GradAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Az / AreaMinor; // 0,0
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_B[iVertex + BEGINNING_OF_CENTRAL] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT); // 0,0, BZ
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor;
ROCAzduetoAdvection[iVertex + BEGINNING_OF_CENTRAL] = 0.0;
ROCAzdotduetoAdvection[iVertex + BEGINNING_OF_CENTRAL] = 0.0;
p_GradTe[iVertex + BEGINNING_OF_CENTRAL] = Vector2(0.0, 0.0);
}; // // was it domain vertex or Az-only
};// if (threadIdx.x < threadsPerTileMajor)
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// T2 prevT, nextT, oppT;
//f64 prevAz, nextAz, oppAz, ourAz;
//f64 prevAzdot, nextAzdot, oppAzdot, ourAzdot;
f64_vec3 MAR_ion, MAR_elec;
// this is not a clever way of doing it. Want more careful.
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
if ((izNeighMinor[3] >= StartMinor) && (izNeighMinor[3] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[3] - StartMinor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[3]];
oppAz = temp.Az;
};
p_LapAz[iMinor] = oppAz - ourAz;
ROCAzduetoAdvection[iMinor] = 0.0;
ROCAzdotduetoAdvection[iMinor] = 0.0;
p_GradAz[iMinor] = Vector2(0.0, 0.0);
memset(&(p_B[iMinor]), 0, sizeof(f64_vec3));
p_GradTe[iMinor] = Vector2(0.0, 0.0);
p_AreaMinor[iMinor] = 1.0e-12;
memset(&(p_MAR_ion[iMinor]), 0, sizeof(f64_vec3));
memset(&(p_MAR_elec[iMinor]), 0, sizeof(f64_vec3));
}
else {
Our_integral_curl_Az.x = 0.0;
Our_integral_curl_Az.y = 0.0;
Our_integral_grad_Azdot.x = 0.0;
Our_integral_grad_Azdot.y = 0.0;
Our_integral_grad_Az.x = 0.0;
Our_integral_grad_Az.y = 0.0;
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
f64 AreaMinor_for_A = 0.0;
short iprev, inext, i;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
memcpy(&MAR_ion, p_MAR_ion + iMinor, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec + iMinor, sizeof(f64_vec3));
iprev = 5;
i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prevT = shared_T[izNeighMinor[iprev] - StartMinor];
prevAzdot = shared_Azdot[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevAzdot = shared_Azdot_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevT = shared_T_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
T3 prev_T = p_T_minor[izNeighMinor[iprev]];
prevT.Te = prev_T.Te; prevT.Ti = prev_T.Ti;
AAdot temp = p_AAdot[izNeighMinor[iprev]];
prevAz = temp.Az;
prevAzdot = temp.Azdot;
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
oppT = shared_T[izNeighMinor[i] - StartMinor];
oppAzdot = shared_Azdot[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppAzdot = shared_Azdot_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppT = shared_T_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
T3 opp_T = p_T_minor[izNeighMinor[i]];
oppT.Te = opp_T.Te; oppT.Ti = opp_T.Ti;
AAdot temp = p_AAdot[izNeighMinor[i]];
oppAz = temp.Az;
oppAzdot = temp.Azdot;
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
// indexminor sequence:
// 0 = corner 0
// 1 = neighbour 2
// 2 = corner 1
// 3 = neighbour 0
// 4 = corner 2
// 5 = neighbour 1
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
// Pathological case: OUTERMOST vertex where neigh_len is not correct to take as == tri_len
// !
// ///////////////////////////////////////////////////////////////////////////////////////////
// [0] is on our clockwise side rel to [1]. That means it is anticlockwise for the vertex.
// That means we interpolate with the value from next tri around.
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
//This matches a diagram:
//
// 2---(4)----(3)---1 = corner 1 = indexminor 2: (2,3)
// \ / \ /
// \/ \ /
// (5\ (2/ indexminor 1 = neighbour 2: (1,2)
// \ /
// \0)--(1/
// \ _/
// 0 = corner 0 = indexminor0
};
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextT = shared_T[izNeighMinor[inext] - StartMinor];
nextAzdot = shared_Azdot[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextAzdot = shared_Azdot_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextT = shared_T_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[inext]];
nextAz = temp.Az;
nextAzdot = temp.Azdot;
T3 next_T = p_T_minor[izNeighMinor[inext]];
nextT.Te = next_T.Te; nextT.Ti = next_T.Ti;
next_T = p_T_minor[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0); // looks anticlockwise
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
//if ((i % 2 == 0) || ((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
// We have to not muck around with prevpos because here it's being used for A.
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
f64 Azdot_edge = SIXTH * (2.0*ourAzdot + 2.0*oppAzdot + prevAzdot + nextAzdot);
Our_integral_grad_Azdot += Azdot_edge * edge_normal;
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
AreaMinor_for_A += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
T3 T0, T1; // waste of registers
f64 n1;
T0.Te = THIRD* (prevT.Te + shared_T[threadIdx.x].Te + oppT.Te);
T1.Te = THIRD * (nextT.Te + shared_T[threadIdx.x].Te + oppT.Te);
T0.Ti = THIRD * (prevT.Ti + shared_T[threadIdx.x].Ti + oppT.Ti);
T1.Ti = THIRD * (nextT.Ti + shared_T[threadIdx.x].Ti + oppT.Ti); // assumes point is at simple average of tri and vert centres.
n0 = n_array[i];
n1 = n_array[inext]; // !
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
// typical edge
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
} else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// endpt0 = THIRD * (prevpos + info.pos + opppos);
// endpt1 = THIRD * (nextpos + info.pos + opppos);
// edge_normal.x = endpt1.y - endpt0.y;
// edge_normal.y = endpt0.x - endpt1.x;
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER-r2) / (r1-r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
} else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// set nT on the edge: try just the average of the two nT, weighted by distance to own centre.
// Recall periodic when we look at distance to own centre.
f64 nTi_edge = 0.5*(p_n_minor[iMinor].n*shared_T[threadIdx.x].Ti + p_n_minor[izNeighMinor[i]].n*oppT.Ti);
f64 nTe_edge = 0.5*(p_n_minor[iMinor].n*shared_T[threadIdx.x].Te + p_n_minor[izNeighMinor[i]].n*oppT.Te);
MAR_ion -= Make3(nTi_edge*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(nTe_edge*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(shared_T[threadIdx.x].Te + oppT.Te) * edge_normal;
} else {
// looking out the bottom of the insulator triangle at a within-insulator vertex or triangle.
// so we want to project the point up to the insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
// endpt0 is THIRD * (prevpos + info.pos + opppos)
// move towards the position that is 2 previous --- ie the vertex above.
// (Don't forget PBC.)
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
} else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
// Don't forget PBC
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
} else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
}
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
f64 nTi_edge = p_n_minor[iMinor].n*shared_T[threadIdx.x].Ti;
f64 nTe_edge = p_n_minor[iMinor].n*shared_T[threadIdx.x].Te;
MAR_ion -= Make3(nTi_edge*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(nTe_edge*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += shared_T[threadIdx.x].Te * edge_normal;
// will be a 0 contribution if endpt1 = endpt0, that's ok.
};
};
} else {
// Typical tri.
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
};
if (TESTTRI) {
printf("GPU : %d : contribs MAR_ion.y %1.11E MAR_elec.y %1.11E \n"
"n0 %1.10E n1 %1.10E Ti0 %1.10E Ti1 %1.10E edgenormal.y %1.10E\n",
CHOSEN,
-0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.y,
-0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal.y,
n0, n1, T0.Ti, T1.Ti, edge_normal.y);
}
// Having a real problem with AreaMinor.
// All very well how it is used here but the one we should record, for creating N and hence pressure effects ..
// is the one that comes from rolling points upwards.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x; // Area to save.
// See a way that FP accuracy was eroded: we take a difference of two close things already to get edge_normal.
// can that be cleverly avoided? For all calcs?
endpt0 = endpt1;
n0 = n1;
iprev = i;
prevpos = opppos;
prevAz = oppAz;
prevAzdot = oppAzdot;
prevT = oppT;
opppos = nextpos;
oppAz = nextAz;
oppAzdot = nextAzdot;
oppT = nextT;
};
/*if (info.flag == CROSSING_INS) {
// In this case set v_r = 0 and set a_TP_r = 0 and dv/dt _r = 0 in general
//f64_vec2 rhat = info.pos / info.pos.modulus();
MAR_ion -= Make3(
(MAR_ion.dotxy(info.pos) /
(info.pos.x*info.pos.x + info.pos.y*info.pos.y))*info.pos, 0.0);
MAR_elec -= Make3(
(MAR_elec.dotxy(info.pos) /
(info.pos.x*info.pos.x + info.pos.y*info.pos.y))*info.pos, 0.0);
no
// and we looked at insulator values for T so Grad Te was meaningless:
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
// I think we do need to make v_r = 0. It's common sense that it IS 0
// since we site our v_r estimate on the insulator. Since it is sited there,
// it is used for traffic into the insulator by n,nT unless we pick out
// insulator-abutting cells on purpose.
// However, we then should make an energy correction -- at least if
// momentum is coming into this minor cell and being destroyed.
// Doesn't quite work like that. We do not destroy, we just do not store a value for the mom in the domain part of cell.
};*/
/*
p_GradAz[iMinor] = Our_integral_grad_Az / AreaMinor_for_A;
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor_for_A;
p_GradTe[iMinor] = Our_integral_grad_Te / AreaMinor;
p_B[iMinor] = Make3(Our_integral_curl_Az / AreaMinor_for_A, BZ_CONSTANT);
p_AreaMinor[iMinor] = AreaMinor;
// wow :
f64_vec2 overall_v_ours = p_v_overall_minor[iMinor];
ROCAzduetoAdvection[iMinor] = overall_v_ours.dot(Our_integral_grad_Az / AreaMinor);
ROCAzdotduetoAdvection[iMinor] = overall_v_ours.dot(Our_integral_grad_Azdot / AreaMinor);
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iMinor, &(MAR_ion), sizeof(f64_vec3));
memcpy(p_MAR_elec + iMinor, &(MAR_elec), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
iprev = 5; i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prevAzdot = shared_Azdot[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevAzdot = shared_Azdot_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[iprev]];
prevAz = temp.Az;
prevAzdot = temp.Azdot;
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
oppAzdot = shared_Azdot[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppAzdot = shared_Azdot_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[i]];
oppAz = temp.Az;
oppAzdot = temp.Azdot;
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextAzdot = shared_Azdot[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextAzdot = shared_Azdot_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[inext]];
nextAz = temp.Az;
nextAzdot = temp.Azdot;
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0); // looks anticlockwise
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
// if ((i % 2 == 0) || // vertex neigh
// ((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
f64 Azdot_edge = SIXTH * (2.0*ourAzdot + 2.0*oppAzdot + prevAzdot + nextAzdot);
Our_integral_grad_Azdot += Azdot_edge * edge_normal;
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
// minus
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
iprev = i;
prevpos = opppos;
prevAz = oppAz;
prevAzdot = oppAzdot;
opppos = nextpos;
oppAz = nextAz;
oppAzdot = nextAzdot;
};
p_GradAz[iMinor] = Our_integral_grad_Az / AreaMinor;
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_B[iMinor] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT);
p_AreaMinor[iMinor] = AreaMinor;
ROCAzduetoAdvection[iMinor] = 0.0;
ROCAzdotduetoAdvection[iMinor] = 0.0;
} // non-domain tri
}; // was it FRILL
// Okay. While we have n_shards in memory we could proceed to overwrite with vxy.
// But get running first before using union and checking same.
}*/
__global__ void kernelCreate_pressure_gradT_and_gradA_CurlA_minor_noadvect(
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_minor,
AAdot * __restrict__ p_AAdot,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just so we can handle insulator
bool * __restrict__ bz_pressureflag,
f64_vec2 * __restrict__ p_GradTe,
f64_vec2 * __restrict__ p_GradAz,
f64_vec3 * __restrict__ p_B
)
{
// Getting this down to 8 vars we could have 512 threads (12 vars/thread total with vertex vars)
__shared__ T2 shared_T[threadsPerTileMinor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ ShardModel shared_n_shards[threadsPerTileMajor]; // 5 + 13/2 + below, 2.5 -> 14 doubles.
// Tile size is 256 though so 14 doubles will allow 1x to run. We have extra shared space if we need it.
// We could also argue that with shards for n_ion in memory we are better off doing an overwrite and doing stuff for nv also.
// never mind that for now. <-- ?
// 2019: Hang on. Why did I use shards? It's quite a good idea. If we are flat then the pressure lands more on the triangles
// at the interface. Makes a consistent set of values of n to pave the space.
__shared__ T2 shared_T_verts[threadsPerTileMajor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
// There is a good argument for splitting out A,Adot to a separate routine.
// That way we could have 10.5 => 585 ie 576 = 288*2 threads.
// Here we got (2+1+1+2)*1.5 = 9 , + 6.5 = 15.5 -> 384 minor threads max.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
{
AAdot temp = p_AAdot[iMinor];
shared_Az[threadIdx.x] = temp.Az;
}
{
T3 T_ = p_T_minor[iMinor];
shared_T[threadIdx.x].Te = T_.Te;
shared_T[threadIdx.x].Ti = T_.Ti;
}
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
AAdot temp = p_AAdot[iVertex + BEGINNING_OF_CENTRAL];
shared_Az_verts[threadIdx.x] = temp.Az;
T3 T_ = p_T_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_T_verts[threadIdx.x].Te = T_.Te;
shared_T_verts[threadIdx.x].Ti = T_.Ti; // MOVED THIS OUT OF the following branch to see it match CPU
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
}
else {
// save several bus trips;
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
//shared_T_verts[threadIdx.x].Te = 0.0;
//shared_T_verts[threadIdx.x].Ti = 0.0;
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
};
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
//f64 ourAzdot, oppAzdot, prevAzdot, nextAzdot;
f64_vec2 opppos, prevpos, nextpos, edge_normal;
T2 oppT, prevT, nextT;
//nvals our_n, opp_n, prev_n, next_n;
f64_vec2 Our_integral_curl_Az, Our_integral_grad_Az, Our_integral_grad_Te;
f64 Our_integral_Lap_Az;
if (threadIdx.x < threadsPerTileMajor) {
Our_integral_curl_Az.x = 0.0;
Our_integral_curl_Az.y = 0.0;
Our_integral_grad_Az.x = 0.0;
Our_integral_grad_Az.y = 0.0;
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
f64_vec3 MAR_ion, MAR_elec;
memcpy(&(MAR_ion), &(p_MAR_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
memcpy(&(MAR_elec), &(p_MAR_elec[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
ourAz = shared_Az_verts[threadIdx.x];
bool bPressure = bz_pressureflag[iVertex];
// True for DOMAIN_VERTEX, unless you've got a crossing_cath in which case it's false.
if (bPressure) {
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevT = shared_T[izTri[iprev] - StartMinor];
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
T3 prev_T = p_T_minor[izTri[iprev]];
prevT.Te = prev_T.Te; prevT.Ti = prev_T.Ti;
AAdot temp = p_AAdot[izTri[iprev]];
prevAz = temp.Az;
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppT = shared_T[izTri[i] - StartMinor];
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
} else {
T3 opp_T = p_T_minor[izTri[i]];
oppT.Te = opp_T.Te; oppT.Ti = opp_T.Ti;
AAdot temp = p_AAdot[izTri[i]];
oppAz = temp.Az;
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt1, endpt0 = THIRD * (info.pos + opppos + prevpos);
short iend = tri_len;
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextT = shared_T[izTri[inext] - StartMinor];
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
T3 next_T = p_T_minor[izTri[inext]];
nextT.Te = next_T.Te; nextT.Ti = next_T.Ti;
AAdot temp = p_AAdot[izTri[inext]];
nextAz = temp.Az;
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
T2 T0, T1;
f64 n1;
T0.Te = THIRD* (prevT.Te + shared_T_verts[threadIdx.x].Te + oppT.Te);
T1.Te = THIRD * (nextT.Te + shared_T_verts[threadIdx.x].Te + oppT.Te);
T0.Ti = THIRD * (prevT.Ti + shared_T_verts[threadIdx.x].Ti + oppT.Ti);
T1.Ti = THIRD * (nextT.Ti + shared_T_verts[threadIdx.x].Ti + oppT.Ti);
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
// So this is pretty stupid ---
// If shardmodel went for flat then we have decided that there is no pressure gradient affecting v here.
// Mind you we didn't expect it to be flat nearly as often as it is flat.
// Think carefully about what pressure we want to feel.
// It makes a kind of sense if you have a cliff of density then you feel it in the triangle in between.
// ***************************************************************************************
// But that won't push points apart. It just sends stuff through the wall.
// ***************************************************************************************
// Hmm.
// It's a shame we can't just use actual n values to infer gradient over a region.
// It probably creates wobbles in v as well, because if we move fast particles at edge then we leave
// Behind a still-lower v in the vertex-centered minor. <-- yes, this instability is clear in practice.
// The scheme is kind of skewiffifying.
// Assume neighs 0,1 are relevant to border with tri 0 minor
// To get integral grad we add the averages along the edges times edge_normals
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
if (TESTPRESSUREY) {
printf("Pressure vertex %d MAR_ion.y %1.9E contrib.y %1.9E n0 %1.9E Ti0 %1.9E n1 %1.9E Ti1 %1.9E edge_normal.y %1.9E \n",
VERTCHOSEN, MAR_ion.y,
-0.5*(n0*T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.y,
n0, T0.Ti, n1, T1.Ti, edge_normal.y);
}
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
// if (iVertex + BEGINNING_OF_CENTRAL == CHOSEN)
// printf("GPU %d : GradTe contrib %1.14E %1.14E Te %1.14E opp %1.14E next %1.14E prev %1.14E edge_normal %1.14E %1.14E\n", iVertex + BEGINNING_OF_CENTRAL,
// 0.5*(T0.Te + T1.Te) * edge_normal.x,
//0.5*(T0.Te + T1.Te) * edge_normal.y,
// shared_T_verts[threadIdx.x].Te, oppT.Te, nextT.Te, prevT.Te,
//edge_normal.x, edge_normal.y);
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
// Introduced minus because we otherwise are getting negative of curl.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prevAz = oppAz;
prevT = oppT;
opppos = nextpos;
oppAz = nextAz;
oppT = nextT;
}; // next i
p_GradAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Az / AreaMinor;
p_GradTe[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Te / AreaMinor;
p_B[iVertex + BEGINNING_OF_CENTRAL] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT);
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iVertex + BEGINNING_OF_CENTRAL, &MAR_ion, sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex + BEGINNING_OF_CENTRAL, &MAR_elec, sizeof(f64_vec3));
} else {
Vector2 zero(0.0, 0.0);
p_GradAz[iVertex + BEGINNING_OF_CENTRAL] = zero;
p_GradTe[iVertex + BEGINNING_OF_CENTRAL] = zero;
p_B[iVertex + BEGINNING_OF_CENTRAL] = Make3(zero, BZ_CONSTANT);
// we certainly could still calculate B, though that was not how this was before.
}; // bPressure
};// if (threadIdx.x < threadsPerTileMajor)
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// T2 prevT, nextT, oppT;
//f64 prevAz, nextAz, oppAz, ourAz;
//f64 prevAzdot, nextAzdot, oppAzdot, ourAzdot;
f64_vec3 MAR_ion,MAR_elec;
// this is not a clever way of doing it. Want more careful.
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
if ((izNeighMinor[3] >= StartMinor) && (izNeighMinor[3] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[3] - StartMinor];
} else {
AAdot temp = p_AAdot[izNeighMinor[3]];
oppAz = temp.Az;
};
// p_LapAz[iMinor] = oppAz - ourAz; // OBSOLETE ---- need to delete this from routine.
p_GradAz[iMinor] = Vector2(0.0, 0.0);
memset(&(p_B[iMinor]), 0, sizeof(f64_vec3));
p_GradTe[iMinor] = Vector2(0.0, 0.0);
// p_AreaMinor[iMinor] = 1.0e-12;
memset(&(p_MAR_ion[iMinor]), 0, sizeof(f64_vec3));
memset(&(p_MAR_elec[iMinor]), 0, sizeof(f64_vec3));
} else {
Our_integral_curl_Az.x = 0.0;
Our_integral_curl_Az.y = 0.0;
Our_integral_grad_Az.x = 0.0;
Our_integral_grad_Az.y = 0.0;
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
f64 AreaMinor_for_A = 0.0;
short iprev, inext, i;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
memcpy(&MAR_ion, p_MAR_ion + iMinor, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec + iMinor, sizeof(f64_vec3));
iprev = 5;
i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prevT = shared_T[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevT = shared_T_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
T3 prev_T = p_T_minor[izNeighMinor[iprev]];
prevT.Te = prev_T.Te; prevT.Ti = prev_T.Ti;
AAdot temp = p_AAdot[izNeighMinor[iprev]];
prevAz = temp.Az;
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
oppT = shared_T[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppT = shared_T_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
T3 opp_T = p_T_minor[izNeighMinor[i]];
oppT.Te = opp_T.Te; oppT.Ti = opp_T.Ti;
AAdot temp = p_AAdot[izNeighMinor[i]];
oppAz = temp.Az;
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
// indexminor sequence:
// 0 = corner 0
// 1 = neighbour 2
// 2 = corner 1
// 3 = neighbour 0
// 4 = corner 2
// 5 = neighbour 1
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
// Pathological case: OUTERMOST vertex where neigh_len is not correct to take as == tri_len
// !
// ///////////////////////////////////////////////////////////////////////////////////////////
// [0] is on our clockwise side rel to [1]. That means it is anticlockwise for the vertex.
// That means we interpolate with the value from next tri around.
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
if (TESTTRI)
printf("%d 01A n_array 01 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[0], n_array[1], cornerindex.i1,
shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I],
shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
// the first two entries
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
} else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
// n1 goes with "prev" -- did I do that on purpose?
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
} else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
if (TESTTRI)
printf("%d 01B n_array 01 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[0], n_array[1], cornerindex.i1,
p_n_shards[cornerindex.i1].n[who_am_I],
p_n_shards[cornerindex.i1].n_cent);
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
if (TESTTRI)
printf("%d 23A n_array 23 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[2], n_array[3], cornerindex.i2,
shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I],
shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
if (TESTTRI)
printf("%d 23B n_array 23 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[2], n_array[3], cornerindex.i2,
p_n_shards[cornerindex.i2].n[who_am_I],
p_n_shards[cornerindex.i2].n_cent);
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
if (TESTTRI)
printf("%d 45A n_array 45 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[4], n_array[5], cornerindex.i3,
shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I],
shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
} else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
if (TESTTRI)
printf("%d 45B n_array 45 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[4], n_array[5], cornerindex.i3,
p_n_shards[cornerindex.i3].n[who_am_I],
p_n_shards[cornerindex.i3].n_cent);
//This matches a diagram:
//
// 2---(4)----(3)---1 = corner 1 = indexminor 2: (2,3)
// \ / \ /
// \/ \ /
// (5\ (2/ indexminor 1 = neighbour 2: (1,2)
// \ /
// \0)--(1/
// \ _/
// 0 = corner 0 = indexminor0
};
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextT = shared_T[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextT = shared_T_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[inext]];
nextAz = temp.Az;
T3 next_T = p_T_minor[izNeighMinor[inext]];
nextT.Te = next_T.Te; nextT.Ti = next_T.Ti;
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
AreaMinor_for_A += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
//f64 area_quadrilateral = 0.5*(
// (info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
// + (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
// + (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
// + (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
// );
////f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
////if ((i % 2 == 0) || ((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
//if ( (opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
// (opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
// Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
//// Modified here ..
T3 T0, T1; // waste of registers
f64 n1;
T0.Te = THIRD* (prevT.Te + shared_T[threadIdx.x].Te + oppT.Te);
T1.Te = THIRD * (nextT.Te + shared_T[threadIdx.x].Te + oppT.Te);
T0.Ti = THIRD * (prevT.Ti + shared_T[threadIdx.x].Ti + oppT.Ti);
T1.Ti = THIRD * (nextT.Ti + shared_T[threadIdx.x].Ti + oppT.Ti);
n0 = n_array[i];
n1 = n_array[inext]; // !
// To get integral grad we add the averages along the edges times edge_normals
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
// typical edge
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
} else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
// Or allowed a below-ins value to affect something anyway.
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
}
else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// set nT on the edge: try just the average of the two nT, weighted by distance to own centre.
// Recall periodic when we look at distance to own centre.
f64 nTi_edge = 0.5*(p_n_minor[iMinor].n*shared_T[threadIdx.x].Ti + p_n_minor[izNeighMinor[i]].n*oppT.Ti);
f64 nTe_edge = 0.5*(p_n_minor[iMinor].n*shared_T[threadIdx.x].Te + p_n_minor[izNeighMinor[i]].n*oppT.Te);
MAR_ion -= Make3(nTi_edge*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(nTe_edge*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(shared_T[threadIdx.x].Te + oppT.Te) * edge_normal;
} else {
// looking out the bottom of the insulator triangle at a within-insulator vertex or triangle.
// so we want to project the point up to the insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
// endpt0 is THIRD * (prevpos + info.pos + opppos)
// move towards the position that is 2 previous --- ie the vertex above.
// (Don't forget PBC.)
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
} else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
// Don't forget PBC
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
} else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
}
f64 nTi_edge = p_n_minor[iMinor].n*shared_T[threadIdx.x].Ti;
f64 nTe_edge = p_n_minor[iMinor].n*shared_T[threadIdx.x].Te;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
MAR_ion -= Make3(nTi_edge*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(nTe_edge*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += shared_T[threadIdx.x].Te * edge_normal;
// will be a 0 contribution if endpt1 = endpt0, that's ok.
};
}; // domain triangle opposite or not
} else {
// Typical tri.
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
};
if (TESTTRI) {
printf("pressure %d : contribs MAR_ion.x %1.11E MAR_elec.x %1.11E \n"
"contribs MAR_ion.y %1.11E MAR_elec.y %1.11E \n"
"n0 %1.10E n1 %1.10E Ti0 %1.10E Ti1 %1.10E edgenormal %1.9E %1.9E\n",
iMinor,
-0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.x,
-0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal.x,
-0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.y,
-0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal.y,
n0, n1, T0.Ti, T1.Ti, edge_normal.x, edge_normal.y);
}
// if (Az_edge != Az_edge) {
// printf("GPU : %d : Az_edge %1.9E ourAz %1.9E oppAz %1.9E \n ourintegralgradTe %1.9E %1.9E contrib %1.9E %1.9E T01 %1.9E %1.9E edgenormal %1.9E %1.9E\n"
// "prevT.Te %1.9E ourT.Te %1.9E oppT.Te %1.9E nextT.Te %1.9E \n",
// iMinor, Az_edge, ourAz, oppAz,
// Our_integral_grad_Te.x, Our_integral_grad_Te.y,
// 0.5*(T0.Te + T1.Te) * edge_normal.x, 0.5*(T0.Te + T1.Te) * edge_normal.y,
// T0.Te, T1.Te, edge_normal.x, edge_normal.y,
// prevT.Te, shared_T[threadIdx.x].Te,oppT.Te,nextT.Te
// );
// }
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
//
// if ((TESTTRI))
// printf("GPU AreaMinor %d : %1.14E from += %1.14E : endpt0.x %1.14E endpt1.x %1.14E edge_normal.x %1.14E\n"
// "endpt1.y endpt0.y %1.14E %1.14E \n",
// iMinor, AreaMinor, (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x,
// endpt0.x, endpt1.x, edge_normal.x,
// endpt1.y, endpt0.y);
// See a way that FP accuracy was eroded: we take a difference of two close things already to get edge_normal.
// can that be cleverly avoided? For all calcs?
endpt0 = endpt1;
n0 = n1;
iprev = i;
prevpos = opppos;
prevAz = oppAz;
// prevAzdot = oppAzdot;
prevT = oppT;
opppos = nextpos;
oppAz = nextAz;
// oppAzdot = nextAzdot;
oppT = nextT;
};
// No setting a_r = 0
p_GradAz[iMinor] = Our_integral_grad_Az / AreaMinor_for_A;
p_GradTe[iMinor] = Our_integral_grad_Te / AreaMinor;
p_B[iMinor] = Make3(Our_integral_curl_Az / AreaMinor_for_A, BZ_CONSTANT);
// p_AreaMinor[iMinor] = AreaMinor;
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iMinor, &(MAR_ion), sizeof(f64_vec3));
memcpy(p_MAR_elec + iMinor, &(MAR_elec), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
// We do not need B or Grad A outside of the domain. !
iprev = 5; i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[iprev]];
prevAz = temp.Az;
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[i]];
oppAz = temp.Az;
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[inext]];
nextAz = temp.Az;
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
// integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
// f64 area_quadrilateral = 0.5*(
// (info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
// + (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
// + (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
// + (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
// );
// //f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
//// if ((i % 2 == 0) || // vertex neigh
//// ((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
// if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
// (opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
// Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
prevAz = oppAz;
opppos = nextpos;
oppAz = nextAz;
};
p_GradAz[iMinor] = Our_integral_grad_Az / AreaMinor;
// p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_B[iMinor] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT);
// p_AreaMinor[iMinor] = AreaMinor;
} // non-domain tri
}; // was it FRILL
// Okay. While we have n_shards in memory we could proceed to overwrite with vxy.
// But get running first before using union and checking same.
}
__global__ void kernelCreate_momflux_minor(
structural * __restrict__ p_info_minor,
v4 * __restrict__ p_vie_minor,
f64_vec2 * __restrict__ p_v_overall_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor,
NTrates * __restrict__ NT_addition_tri // inevitable
)
{
__shared__ v4 shared_vie[threadsPerTileMinor];
__shared__ f64_vec2 shared_v_overall[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
__shared__ v4 shared_vie_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_v_overall_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_vie[threadIdx.x] = p_vie_minor[iMinor];
shared_v_overall[threadIdx.x] = p_v_overall_minor[iMinor];
// Perhaps the real answer is this. Advection and therefore advective momflux
// do not need to be recalculated very often at all. At 1e6 cm/s, we aim for 1 micron,
// get 1e-10s to actually do the advection !!
// So an outer cycle. Still limiting the number of total things in a minor tile. We might like 384 = 192*2.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if (info.flag == DOMAIN_VERTEX) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
memcpy(&(shared_vie_verts[threadIdx.x]), &(p_vie_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(v4));
shared_v_overall_verts[threadIdx.x] = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
memset(&(shared_vie_verts[threadIdx.x]), 0, sizeof(v4)); // this was always a bug as long as we had traffic near outermost!
memset(&(shared_v_overall_verts[threadIdx.x]), 0, sizeof(f64_vec2)); // it actually is zero at outermost
if (info.flag == OUTERMOST)
memcpy(&(shared_vie_verts[threadIdx.x]), &(p_vie_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(v4));
};
};
__syncthreads();
v4 our_v, opp_v, prev_v, next_v;
f64_vec2 our_v_overall, prev_v_overall, next_v_overall, opp_v_overall;
f64_vec2 opppos, prevpos, nextpos;
if (threadIdx.x < threadsPerTileMajor) {
three_vec3 ownrates;
memcpy(&(ownrates.ion), &(p_MAR_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
memcpy(&(ownrates.elec), &(p_MAR_elec[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
our_v = shared_vie_verts[threadIdx.x];
our_v_overall = shared_v_overall_verts[threadIdx.x];
if (info.flag == DOMAIN_VERTEX) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vie[izTri[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_vie_minor[izTri[iprev]];
prev_v_overall = p_v_overall_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
prev_v_overall = Clockwise_d*prev_v_overall;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
prev_v_overall = Anticlockwise_d*prev_v_overall;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vie[izTri[i] - StartMinor];
opp_v_overall = shared_v_overall[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_vie_minor[izTri[i]];
opp_v_overall = p_v_overall_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
// Think carefully: DOMAIN vertex cases for n,T ...
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64 vez0, viz0, vez1, viz1;
f64_vec2 vxy0, vxy1, endpt1, edge_normal;
short iend = tri_len;
// We deal with DOMAIN_VERTEX only!!
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vie[izTri[inext] - StartMinor];
next_v_overall = shared_v_overall[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_vie_minor[izTri[inext]];
next_v_overall = p_v_overall_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
next_v_overall = Anticlockwise_d*next_v_overall;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 n1;
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
// Assume neighs 0,1 are relevant to border with tri 0 minor.
// *********
// Verify that tri 0 is formed from our vertex, neigh 0 and neigh 1; - tick I think
// *********
vxy0 = THIRD * (our_v.vxy + prev_v.vxy + opp_v.vxy);
vxy1 = THIRD * (our_v.vxy + opp_v.vxy + next_v.vxy);
vez0 = THIRD * (our_v.vez + opp_v.vez + prev_v.vez);// not used?
viz0 = THIRD * (our_v.viz + opp_v.viz + prev_v.viz);// not used?
vez1 = THIRD * (our_v.vez + opp_v.vez + next_v.vez);// not used?
viz1 = THIRD * (our_v.viz + opp_v.viz + next_v.viz); // not used?
f64 relvnormal = 0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
// In reasonable conditions I suppose that is something sensible.
// However if we use n v_edge relvnormal then from a fast upwind cell we are always ejecting the slowest material!
// That is unstable.
// We could profitably create a minmod model of velocity.
// However for now let's try pretending there is a shock front (so use average v for advection) and the upwind nv
// to advect is just the upwind cell average.
// FIX FOR NOW, 22/11/20 :
// We do not allow traffic from insulator-crossing triangles to/from vertex minors.
// This is because we can't have an intermediate cell of momentum within a density cell that has only one end.
// ===========================================================================================================
int neighflag = p_info_minor[izTri[i]].flag;
if (neighflag == DOMAIN_TRIANGLE) {
if (relvnormal > 0.0) {
// losing stuff
ownrates.ion -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.viz);
ownrates.elec -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.vez);
if (TESTADVECTZ) {
printf("GPUadvect %d izTri[%d] %d USING our vez %1.9E [ oppvez %1.9E ] relvnormaldot %1.9E \n"
"gaining mom %1.9E | n0 %1.9E n1 %1.9E n %1.9E ncent %1.9E edge_normal %1.8E %1.8E vuse %1.8E %1.8E\n",
iVertex, i, izTri[i], our_v.vez, opp_v.vez, relvnormal,
-0.5*relvnormal*(n0 + n1)*our_v.vez, n0, n1, 0.5*(n0 + n1), shared_n_shards[threadIdx.x].n_cent,
edge_normal.x, edge_normal.y,
0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))).x,
0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))).y);
};
}
else {
ownrates.ion -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.viz);
ownrates.elec -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.vez);
if (TESTADVECTZ) {
printf("GPUadvect %d izTri[%d] %d our vez %1.9E [USING oppvez %1.9E ] relvnormaldot %1.9E \n"
"gaining mom %1.9E | n0 %1.9E n1 %1.9E n %1.9E ncent %1.9E edge_normal %1.8E %1.8E vuse %1.8E %1.8E\n",
VERTCHOSEN, i, izTri[i], our_v.vez, opp_v.vez, relvnormal,
-0.5*relvnormal*(n0 + n1)*opp_v.vez, n0, n1, 0.5*(n0 + n1), shared_n_shards[threadIdx.x].n_cent,
edge_normal.x, edge_normal.y,
0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))).x,
0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))).y);
};
//We are using upwind v ... however, n0 came from ourselves because we look out of our own minor into a triangle.
// Why it's minus? : relvnormal was less than zero but we gain a positive amt of opp_v.
};
};
// vie.vez = (vie_k.vez*Nk + h_use * MAR.z) / Nplus;
// OLD, unstable :
//ownrates.ion -= 0.5*relvnormal*(n0 *(Make3(vxy0 - our_v.vxy, viz0 - our_v.viz) + n1*(Make3(vxy1 - our_v.vxy, viz1 - our_v.viz))));
if (TESTADVECT) {
printf("GPUadvect %d izTri[%d] %d ownrates.ion.y %1.9E contrib.y %1.9E %1.9E [ours,>0 out,<0] \n"
"relvnormal %1.10E n0 %1.9E n1 %1.9E vxy0.y %1.8E vxy1.y %1.8E\n"
"edge_normal %1.8E %1.8E our_v.y %1.8E opp_v.y %1.8E prev_v.y %1.8E next_v.y %1.8E\n",
VERTCHOSEN, i, izTri[i], ownrates.ion.y,
-0.5*relvnormal*(n0 + n1)*our_v.vxy.y, -0.5*relvnormal*(n0 + n1)*opp_v.vxy.y,
relvnormal, n0, n1, vxy0.y, vxy1.y,
edge_normal.x, edge_normal.y,
our_v.vxy.y, opp_v.vxy.y, prev_v.vxy.y, next_v.vxy.y);
};
// ______________________________________________________
//// whether the v that is leaving is greater than our v ..
//// Formula:
//// dv/dt = (d(Nv)/dt - dN/dt v) / N
//// We include the divide by N when we enter the accel routine.
// Somehow we've created an unstable situ. We are chucking out high-nv at the top. higher n and lower v than in our triangle.
// Should we insist on upwind v as what is carried?
//
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
}; // next i
// AreaMinor is not saved, or even calculated for tris.
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iVertex + BEGINNING_OF_CENTRAL, &(ownrates.ion), sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex + BEGINNING_OF_CENTRAL, &(ownrates.elec), sizeof(f64_vec3));
} else {
// NOT domain vertex: Do nothing
};
}; // was it domain vertex or Az-only
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
our_v = shared_vie[threadIdx.x];
our_v_overall = shared_v_overall[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// Why the apparently stupid choice to make another variable? :
three_vec3 ownrates_minor;
memcpy(&(ownrates_minor.ion), &(p_MAR_ion[iMinor]), sizeof(f64_vec3));
memcpy(&(ownrates_minor.elec), &(p_MAR_elec[iMinor]), sizeof(f64_vec3));
f64 vez0, viz0, viz1, vez1;
f64_vec2 vxy0, vxy1;
if (TESTTRI) printf("iMinor %d info.flag %d \nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n",
iMinor, info.flag);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
}
else {
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
if (TESTTRI) printf("iMinor %d info.flag %d \nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n",
iMinor, info.flag);
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vie[izNeighMinor[iprev] - StartMinor]), sizeof(v4));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vie_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
prev_v_overall = shared_v_overall_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_vie_minor[izNeighMinor[iprev]]), sizeof(v4));
prev_v_overall = p_v_overall_minor[izNeighMinor[iprev]];
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
prev_v_overall = Clockwise_d*prev_v_overall;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
prev_v_overall = Anticlockwise_d*prev_v_overall;
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vie[izNeighMinor[i] - StartMinor]), sizeof(v4));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
opp_v_overall = shared_v_overall[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vie_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
opp_v_overall = shared_v_overall_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_vie_minor[izNeighMinor[i]]), sizeof(v4));
opp_v_overall = p_v_overall_minor[izNeighMinor[i]];
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
} else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
iprev = i - 1; if (iprev < 0) iprev = 5;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vie[izNeighMinor[inext] - StartMinor]), sizeof(v4));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
next_v_overall = shared_v_overall[izNeighMinor[inext] - StartMinor];
} else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vie_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
next_v_overall = shared_v_overall_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_vie_minor[izNeighMinor[inext]]), sizeof(v4));
next_v_overall = p_v_overall_minor[izNeighMinor[inext]];
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
next_v_overall = Anticlockwise_d*next_v_overall;
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
n0 = n_array[i];
n1 = n_array[inext]; // 0,1 are either side of corner 0. What is seq of MinorNeigh ? tick
// Assume neighs 0,1 are relevant to border with tri 0 minor.
vxy0 = THIRD * (our_v.vxy + prev_v.vxy + opp_v.vxy);
vxy1 = THIRD * (our_v.vxy + opp_v.vxy + next_v.vxy);
vez0 = THIRD * (our_v.vez + opp_v.vez + prev_v.vez);
viz0 = THIRD * (our_v.viz + opp_v.viz + prev_v.viz);
vez1 = THIRD * (our_v.vez + opp_v.vez + next_v.vez);
viz1 = THIRD * (our_v.viz + opp_v.viz + next_v.viz); // Not used for anything, apparently.
f64 relvnormal = 0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
// Note that average instead of upwind, is of course unstable.
// FIX FOR NOW, 22/11/20 :
// We do not allow traffic from insulator-crossing triangles to/from vertex minors.
// This is because we can't have an intermediate cell of momentum within a density cell that has only one end.
// ===========================================================================================================
if (izNeighMinor[i] < BEGINNING_OF_CENTRAL) // triangle
{
if (relvnormal > 0.0) {
// losing stuff n
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.vez);
}
else {
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.vez);
// Why it's minus?
// relvnormal was less than zero but we gain a positive amt of opp_v.
};
};
} else {
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 v_overall0, v_overall1;
v_overall0 = THIRD * (our_v_overall + prev_v_overall + opp_v_overall);
v_overall1 = THIRD * (our_v_overall + next_v_overall + opp_v_overall);
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
// endpt1 is defined in this way, so its motion must be defined accordingly.
// The v_overall of the below-insulator point is actually 0.
f64 r3 = nextpos.modulus();
v_overall1 = ((DEVICE_RADIUS_INSULATOR_OUTER - r3) / (r1 - r3))*v_overall0;
// but has no radial component:
v_overall1 -= (v_overall1.dot(endpt1)) / (endpt1.dot(endpt1))*endpt1;
} else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
f64 r3 = prevpos.modulus();
v_overall0 = ((DEVICE_RADIUS_INSULATOR_OUTER - r3) / (r2 - r3))*v_overall1;
// but has no radial component:
v_overall0 -= (v_overall0.dot(endpt0)) / (endpt0.dot(endpt0))*endpt1;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// have not yet handled how to do momflux between two CROSSING_INS tris.
// the above vxy1 etc will be invalid because of taking data from insulator points.
// Does that mean we will get weird effects? Probably. Have to think here then.
// Reset relvnormal:
if (prev_v.vez == 0.0) vxy0 = 0.5*(our_v.vxy + opp_v.vxy);
if (next_v.vez == 0.0) vxy1 = 0.5*(our_v.vxy + opp_v.vxy);
//vxy0 = THIRD * (our_v.vxy + prev_v.vxy + opp_v.vxy);
if (n0 == 0.0) // generated from shardmodel from inside the insulator, then it should come out 0.
n0 = 0.5*(p_n_minor[iMinor].n + p_n_minor[izNeighMinor[i]].n);
if (n1 == 0.0)
n1 = 0.5*(p_n_minor[iMinor].n + p_n_minor[izNeighMinor[i]].n);
relvnormal = 0.5*(vxy0 + vxy1 - v_overall0 - v_overall1).dot(edge_normal);
if (relvnormal > 0.0) {
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.vez);
} else {
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.vez);
};
} else {
// Looking down into insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
int debugprevflag = 0, debugnextflag = 0;
f64_vec2 endpt0store, endpt1store;
endpt0store = endpt0;
endpt1store = endpt1;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
debugprevflag = 1;
} else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
debugnextflag = 1;
} else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
};
// will be a 0 contribution if endpt1 = endpt0, that's ok.
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// should be facing towards (0,0).
// Insulator arc isn't moving, no v_overall.
relvnormal = our_v.vxy.dot(edge_normal);
if (relvnormal > 0.0) {
f64 n_edge = p_n_minor[iMinor].n;
// Only the vr component is reversed!!!
// f64 vr = -our_v.vxy.dot(edge_normal) / edge_normal.modulus();
// rhat = -edge_normal/edge_normal.modulus();
// v-= vr rhat
f64_vec2 vr_rhat = edge_normal*((our_v.vxy.dot(edge_normal)) /
(edge_normal.dot(edge_normal)));
// positive amt * negative r vector = negative amt * positive r vector.
f64 vr_squared = our_v.vxy.dot(edge_normal)*our_v.vxy.dot(edge_normal) /
edge_normal.dot(edge_normal);
ownrates_minor.ion -= 2.0*relvnormal*n_edge*Make3(vr_rhat,0.0);
ownrates_minor.elec -= 2.0*relvnormal*n_edge*Make3(vr_rhat, 0.0);
// Now add heat:
NTrates dNT = NT_addition_tri[iMinor];
// change in 0.5 Nmvv = 0.5mv d/dt(Nv) = m*vr*vr*n_edge*relvnormal since v dot vr rhat = vr^2
// change in 1.5 NT should cancel this.
dNT.NiTi += 0.6666666666667*m_i*vr_squared*n_edge*relvnormal;
dNT.NeTe += 0.6666666666667*m_e*vr_squared*n_edge*relvnormal;
// printf("iMinor %d dNiTi %1.9E cont %1.9E vr_squared %1.9E n %1.8E relvn %1.8E our_v %1.8E %1.8E \n"
// "debugflags %d %d endpt0 %1.8E %1.8E endpt1 %1.8E %1.8E previously %1.8E %1.8E, %1.8E %1.8E edgenormal %1.8E %1.8E \n",
// iMinor, dNT.NiTi, 0.6666666666667*vr_squared*n_edge*relvnormal,
// vr_squared, n_edge, relvnormal, our_v.vxy.x, our_v.vxy.y,
// debugprevflag, debugnextflag, endpt0.x, endpt0.y, endpt1.x, endpt1.y,
// endpt0store.x, endpt0store.y, endpt1store.x, endpt1store.y, edge_normal.x, edge_normal.y);
NT_addition_tri[iMinor] = dNT;
};
// If we are pulling away from the ins, do nothing!
};
};
} else {
// Typical edge.
if (relvnormal > 0.0) {
// losing stuff
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.vez);
}
else {
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.vez);
// Why it's minus?
// relvnormal was less than zero but we gain a positive amt of opp_v.
};
};
if (((TESTTRI)))
printf("advectiveGPU %d i %d iznm %d info.flag %d neigh.flag %d contrib %1.10E edge_nml %1.8E %1.8E\n"
"relvnormal %1.10E v_use %1.9E %1.9E n0 %1.12E n1 %1.12E our_vez %1.10E opp_vez %1.10E\n"
"~~~&~&~&~&~&~&~&~&~&~&~&~&~&~&~&~&&~&~&~&~&~&~&~&~&~&~~~\n",
CHOSEN,i, izNeighMinor[i],
info.flag, p_info_minor[izNeighMinor[i]].flag,
-0.5*relvnormal*(n0 + n1)*((relvnormal>0.0)?our_v.vez:opp_v.vez),
edge_normal.x, edge_normal.y,
relvnormal,
(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).x,
(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).y,
n0, n1, our_v.vez, opp_v.vez);
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
};
memcpy(&(p_MAR_ion[iMinor]), &(ownrates_minor.ion), sizeof(f64_vec3));
memcpy(&(p_MAR_elec[iMinor]), &(ownrates_minor.elec), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
} // non-domain tri
}; // was it FRILL
}
// Not optimized: !!
#define FACTOR_HALL (1.0/0.96)
#define FACTOR_PERP (1.2/0.96)
//#define DEBUGNANS
__global__ void kernelCalculate_deps_WRT_beta_Visc(
f64 const hsub,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_parallel_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_ita_parallel_elec_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_elec_minor, // nT / nu ready to look up
f64_vec3 * __restrict__ p_B_minor,
nvals * __restrict__ p_n_minor, // got this
f64 * __restrict__ p_AreaMinor, // got this -> N, Nn
f64_vec3 * __restrict__ p_Jacobi_ion,
f64_vec3 * __restrict__ p_Jacobi_elec,
f64_vec3 * __restrict__ p_d_eps_by_d_beta_i_,
f64_vec3 * __restrict__ p_d_eps_by_d_beta_e_
)
{
// We only need 3 in shared now, can re-do when we do elec
__shared__ f64_vec3 shared_vJ[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_B[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ f64_vec3 shared_vJ_verts[threadsPerTileMajor]; // load & reload in Jacobi regressor v instead of v
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_B_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// Putting some stuff in shared may speed up if there are spills. !!
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64_vec3 our_v, opp_v, prev_v, next_v;
f64_vec2 opppos, prevpos, nextpos;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 d_eps_by_d_beta;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_vJ[threadIdx.x] = p_Jacobi_ion[iMinor]; // is memcpy faster or slower than operator= ?
shared_B[threadIdx.x] = p_B_minor[iMinor].xypart();
shared_ita_par[threadIdx.x] = p_ita_parallel_ion_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_ion_minor[iMinor];
// Perhaps the real answer is this. Advection and therefore advective momflux
// do not need to be recalculated very often at all. At 1e6 cm/s, we aim for 1 micron,
// get 1e-10s to actually do the advection !!
// So an outer cycle. Still limiting the number of total things in a minor tile. We might like 384 = 192*2.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_B_verts[threadIdx.x] = p_B_minor[iVertex + BEGINNING_OF_CENTRAL].xypart();
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_vJ_verts[threadIdx.x]), &(p_Jacobi_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
}
else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_vJ_verts[threadIdx.x]), 0, sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
// IONS FIRST:
if (threadIdx.x < threadsPerTileMajor) {
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
if ((info.flag == DOMAIN_VERTEX) && (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
our_v = shared_vJ_verts[threadIdx.x]; // optimization: use replace or #define to get rid of storing this again.
d_eps_by_d_beta = our_v; // eps = v_k+1 - v_k - h/N MAR
// Rate of change of eps_x = Jacobi_x
f64 Factor = hsub / (p_n_minor[iVertex + BEGINNING_OF_CENTRAL].n * p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] * m_ion);
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vJ[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_Jacobi_ion[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vJ[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_Jacobi_ion[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ci;
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
// Now sort out anticlock vars:
{
f64_vec2 opp_B;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
f64 ita_theirs = p_ita_parallel_ion_minor[izTri[i]];
f64 nu_theirs = p_nu_ion_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = ita_theirs;
nu = nu_theirs;
};
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vJ[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_Jacobi_ion[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (ita_par > 0.0) {
// Order of calculations may help things to go out/into scope at the right times so careful with that.
f64_vec2 gradvx, gradvy, gradviz;
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(our_v.x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + our_v.x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(our_v.x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + our_v.x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(our_v.y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + our_v.y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(our_v.y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + our_v.y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
//
// if (TEST) printf(
// "iVertex %d our_v.y next prev opp %1.8E %1.8E %1.8E %1.8E\n"
// "area_quad %1.8E \n"
// "info.pos %1.8E %1.8E opppos %1.8E %1.8E prev %1.8E %1.8E next %1.8E %1.8E\n",
// iVertex, our_v.vxy.y, next_v.vxy.y, prev_v.vxy.y, opp_v.vxy.y,
// area_quadrilateral,
// info.pos.x, info.pos.y, opppos.x, opppos.y, prevpos.x, prevpos.y, nextpos.x, nextpos.y);
//
gradviz.x = 0.5*(
(our_v.z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + our_v.z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.y = -0.5*(
(our_v.z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + our_v.z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if ((VISCMAG == 0) || (omega_ci.dot(omega_ci) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradviz.x);
Pi_zy = -ita_par*(gradviz.y);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
//visc_contrib.y = -over_m_i*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.x += Factor*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y); // - h/N visc_contrib I think
d_eps_by_d_beta.y += Factor*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z += Factor*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
}
else {
f64 omegamod;
f64_vec3 unit_b, unit_perp, unit_Hall;
{
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
f64 omegasq = omega_ci.dot(omega_ci);
omegamod = sqrt(omegasq);
unit_b = omega_ci / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
// ita_perp = FACTOR_PERP * ita_par * nu*nu / (omegasq + nu*nu);
// ita_cross = FACTOR_HALL * ita_par * nu*omegamod / (omegasq + nu*nu);
}
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
// but we can make do with 3x partials
// 2. Now get partials in magnetic coordinates
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradviz);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradviz);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradviz);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// ownrates will be divided by N to give dv/dt
// visc_contrib.z = over_m_i*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
// ownrates_visc += visc_contrib;
d_eps_by_d_beta.x += -Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y += -Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.z += -Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
// We should have created device function for the visc calc since it is repeated now at least 8 times.
// Note that momflux here already had -, visc_contrib did not contain -over_m_i as for unmag.
}
}; // ita_par > 0.0
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
opppos = nextpos;
opp_v = next_v;
}; // next i
memcpy(p_d_eps_by_d_beta_i_ + iVertex + BEGINNING_OF_CENTRAL, &d_eps_by_d_beta, sizeof(f64_vec3));
} else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// Ion , triangle:
info = p_info_minor[iMinor];
our_v = shared_vJ[threadIdx.x];
d_eps_by_d_beta = our_v;
//if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
{
long izNeighMinor[6];
char szPBC[6];
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) && (shared_ita_par[threadIdx.x] > 0.0)) {
f64 Factor = hsub / (p_n_minor[iMinor].n * p_AreaMinor[iMinor] * m_ion);
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vJ[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vJ_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_Jacobi_ion[izNeighMinor[iprev]]), sizeof(f64_vec3));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vJ[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vJ_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_Jacobi_ion[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ci;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vJ[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vJ_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_Jacobi_ion[izNeighMinor[inext]]), sizeof(f64_vec3));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
// nu = 1.0e10; // DEBUG
bool bUsableSide = true;
{
f64_vec2 opp_B(0.0, 0.0);
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
} else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
} else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
f64 ita_par_opp = p_ita_parallel_ion_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_ion_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
} else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (bUsableSide) {
// New definition of endpoint of minor edge:
f64_vec2 gradvx, gradvy, gradviz;
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(our_v.x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + our_v.x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(our_v.x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + our_v.x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(our_v.y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + our_v.y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(our_v.y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + our_v.y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.x = 0.5*(
(our_v.z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + our_v.z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.y = -0.5*(
(our_v.z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + our_v.z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if ((VISCMAG == 0) || (omega_ci.dot(omega_ci) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradviz.x);
Pi_zy = -ita_par*(gradviz.y);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
d_eps_by_d_beta.x = Factor*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
d_eps_by_d_beta.y = Factor*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z = Factor*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
// So we are saying if edge_normal.x > 0 and gradviz.x > 0
// then Pi_zx < 0 then ownrates += a positive amount. That is correct.
}
else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
f64 omegasq = omega_ci.dot(omega_ci);
omegamod = sqrt(omegasq);
unit_b = omega_ci / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradviz);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradviz);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradviz);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// visc_contrib.x = over_m_i*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
// Screen out looking out into insulator:
// Not really needed since we did bUsableSide, but let's leave it in for now just to be delicate.
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX)) {
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
} else {
// DO NOTHING -- no additions
}
} else {
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
};
}
}; // bUsableSide
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
opppos = nextpos;
opp_v = next_v;
};
memcpy(&(p_d_eps_by_d_beta_i_[iMinor]), &(d_eps_by_d_beta), sizeof(f64_vec3));
}
else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
__syncthreads();
// Now do electron: overwrite ita and nu, copy-paste the above codes very carefully
// OVERWRITE REGRESSOR
shared_ita_par[threadIdx.x] = p_ita_parallel_elec_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_elec_minor[iMinor];
shared_vJ[threadIdx.x] = p_Jacobi_elec[iMinor];
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) // keeping consistent with ion above where we did put OUTERMOST here
{// but we set ita to 0 in the pre routine for outermost.
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_vJ_verts[threadIdx.x] = p_Jacobi_elec[iVertex + BEGINNING_OF_CENTRAL];
} else {
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
memset(&(shared_vJ_verts[threadIdx.x]), 0, sizeof(f64_vec3));
};
};
__syncthreads();
if (threadIdx.x < threadsPerTileMajor) {
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len; // ?!
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
our_v = shared_vJ_verts[threadIdx.x]; // optimization: use replace or #define to get rid of storing this again.
d_eps_by_d_beta = our_v;
if ((info.flag == DOMAIN_VERTEX) && (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
f64 Factor = hsub / (p_n_minor[iVertex + BEGINNING_OF_CENTRAL].n * p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] * m_e);
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vJ[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prev_v = p_Jacobi_elec[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vJ[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
} else {
opp_v = p_Jacobi_elec[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
opp_ita = shared_ita_par[izTri[i] - StartMinor];
opp_nu = shared_nu[izTri[i] - StartMinor];
//ita_par = 0.5*(shared_ita_par_verts[threadIdx.x] + shared_ita_par[izTri[i] - StartMinor]);
//nu = 0.5*(shared_nu_verts[threadIdx.x] + shared_nu[izTri[i] - StartMinor]);
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izTri[i]];
opp_nu = p_nu_elec_minor[izTri[i]];
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par_verts[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vJ[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_Jacobi_elec[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (ita_par > 0.0) {
// Order of calculations may help things to go out/into scope at the right times so careful with that.
f64_vec2 gradvx, gradvy, gradvez;
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(our_v.x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + our_v.x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(our_v.x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + our_v.x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(our_v.y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + our_v.y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(our_v.y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + our_v.y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.x = 0.5*(
(our_v.z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + our_v.z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.y = -0.5*(
(our_v.z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + our_v.z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if ((VISCMAG == 0) || (omega_ce.dot(omega_ce) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
// Let's suppose, Pi_yx means the rate of flow of y-momentum in the x direction.
// Thus when we want to know how much y momentum is flowing through the wall we take
// Pi_yx.edge_x + Pi_yy.edge_y -- reasonable.
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradvez.x);
Pi_zy = -ita_par*(gradvez.y);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
d_eps_by_d_beta.x += Factor*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
d_eps_by_d_beta.y += Factor*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z += Factor*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
}
else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
f64 omegasq = omega_ce.dot(omega_ce);
omegamod = sqrt(omegasq);
unit_b = omega_ce / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradvez);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradvez);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradvez);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y; // b component
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y; // P component
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y; // H component
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
};
};
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
opppos = nextpos;
opp_v = next_v;
}; // next i
memcpy(p_d_eps_by_d_beta_e_ + iVertex + BEGINNING_OF_CENTRAL, &d_eps_by_d_beta, sizeof(f64_vec3));
} else {
// NOT domain vertex: Do nothing
};
};
// Electrons in tris:
info = p_info_minor[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
}
else {
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) && (shared_ita_par[threadIdx.x] > 0.0)){
our_v = shared_vJ[threadIdx.x];
d_eps_by_d_beta = our_v;
f64 Factor = hsub / (p_n_minor[iMinor].n * p_AreaMinor[iMinor] * m_e);
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vJ[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vJ_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_Jacobi_elec[izNeighMinor[iprev]]), sizeof(f64_vec3));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vJ[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vJ_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_Jacobi_elec[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vJ[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vJ_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_Jacobi_elec[izNeighMinor[inext]]), sizeof(f64_vec3));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
bool bUsableSide = true;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
opp_ita = shared_ita_par[izNeighMinor[i] - StartMinor];
opp_nu = shared_nu[izNeighMinor[i] - StartMinor];
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_ita = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izNeighMinor[i]];
opp_nu = p_nu_elec_minor[izNeighMinor[i]];
if (opp_ita == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (bUsableSide) {
// New definition of endpoint of minor edge:
f64_vec2 gradvez, gradvx, gradvy;
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(our_v.x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + our_v.x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(our_v.x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + our_v.x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(our_v.y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + our_v.y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(our_v.y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + our_v.y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.x = 0.5*(
(our_v.z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + our_v.z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.y = -0.5*(
(our_v.z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + our_v.z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if ((VISCMAG == 0) || (omega_ce.dot(omega_ce) < 0.1*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradvez.x);
Pi_zy = -ita_par*(gradvez.y);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX)) {
d_eps_by_d_beta.x += Factor *(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
d_eps_by_d_beta.y += Factor *(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z += Factor *(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
} else {
// DO NOTHING
}
} else {
d_eps_by_d_beta.x += Factor *(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
d_eps_by_d_beta.y += Factor *(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z += Factor *(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
}
} else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
f64 omegasq = omega_ce.dot(omega_ce);
omegamod = sqrt(omegasq);
unit_b = omega_ce / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradvez);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradvez);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradvez);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall
// is the flow of p_x dotted with the edge_normal
// ownrates will be divided by N to give dv/dt
// m N dvx/dt = integral div momflux_x
// Therefore divide here just by m
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX)) {
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
} else {
// DO NOTHING
}
} else {
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
}
}
}; // bUsableSide
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
opppos = nextpos;
opp_v = next_v;
};
memcpy(&(p_d_eps_by_d_beta_e_[iMinor]), &(d_eps_by_d_beta), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
} // non-domain tri
}; // was it FRILL
}
__global__ void
// __launch_bounds__(128) -- manual says that if max is less than 1 block, kernel launch will fail. Too bad huh.
kernelCreate_viscous_contrib_to_MAR_and_NT(
structural * __restrict__ p_info_minor,
v4 * __restrict__ p_vie_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_parallel_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_ita_parallel_elec_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_elec_minor, // nT / nu ready to look up
f64_vec3 * __restrict__ p_B_minor,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
NTrates * __restrict__ p_NT_addition_rate,
NTrates * __restrict__ p_NT_addition_tri)
{
__shared__ v4 shared_vie[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_B[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ v4 shared_vie_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_B_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// 4+2+2+1+1 *1.5 = 15 per thread. That is possibly as slow as having 24 per thread.
// Thus putting some stuff in shared may speed up if there are spills.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 ownrates_visc;
f64 visc_htg;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_vie[threadIdx.x] = p_vie_minor[iMinor];
shared_B[threadIdx.x] = p_B_minor[iMinor].xypart();
shared_ita_par[threadIdx.x] = p_ita_parallel_ion_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_ion_minor[iMinor];
// Perhaps the real answer is this. Advection and therefore advective momflux
// do not need to be recalculated very often at all. At 1e6 cm/s, we aim for 1 micron,
// get 1e-10s to actually do the advection !!
// So an outer cycle. Still limiting the number of total things in a minor tile. We might like 384 = 192*2.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_B_verts[threadIdx.x] = p_B_minor[iVertex + BEGINNING_OF_CENTRAL].xypart();
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_vie_verts[threadIdx.x]), &(p_vie_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(v4));
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_vie_verts[threadIdx.x]), 0, sizeof(v4));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
// IONS FIRST:
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
if ((info.flag == DOMAIN_VERTEX) && (shared_ita_par_verts[threadIdx.x] > 0.0) )
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
// DROP THIS ONE.
// f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
// short iend = tri_len;
//f64_vec2 projendpt0;
//if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// iend = tri_len - 2;
// if (info.flag == OUTERMOST) {
// endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
// }
// else {
// endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
// }
// edge_normal.x = endpt0.y - projendpt0.y;
// edge_normal.y = projendpt0.x - endpt0.x;
// AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
//};
#pragma unroll
for (short i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
f64_vec2 gradvx, gradvy, gradviz;
f64_vec3 htg_diff;
f64_vec2 edge_normal;
// Order of calculations may help things to go out/into scope at the right times so careful with that.
//f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
// we also want to get nu from somewhere. So precompute nu at the time we precompute ita_e = n Te / nu_e, ita_i = n Ti / nu_i.
f64_vec3 omega_ci;
{
f64_vec2 opp_B;
f64 ita_theirs, nu_theirs;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
ita_theirs = shared_ita_par[izTri[i] - StartMinor];
nu_theirs = shared_nu[izTri[i] - StartMinor];
} else {
opp_B = p_B_minor[izTri[i]].xypart();
ita_theirs = p_ita_parallel_ion_minor[izTri[i]];
nu_theirs = p_nu_ion_minor[izTri[i]];
};
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
} else {
ita_par = ita_theirs;
nu = nu_theirs;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
} // Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
if (ita_par > 0.0)
{
v4 opp_v, prev_v, next_v;
f64_vec2 opppos, prevpos, nextpos;
// ideally we might want to leave position out of the loop so that we can avoid reloading it.
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vie[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_vie_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vie[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_vie_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vie[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_vie_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
}
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x);
gradvx.x = 0.5*(
(shared_vie_verts[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.x + shared_vie_verts[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_vie_verts[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.x + shared_vie_verts[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_vie_verts[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.y + shared_vie_verts[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_vie_verts[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.y + shared_vie_verts[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.x = 0.5*(
(shared_vie_verts[threadIdx.x].viz + next_v.viz)*(info.pos.y - nextpos.y)
+ (prev_v.viz + shared_vie_verts[threadIdx.x].viz)*(prevpos.y - info.pos.y)
+ (opp_v.viz + prev_v.viz)*(opppos.y - prevpos.y)
+ (next_v.viz + opp_v.viz)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.y = -0.5*(
(shared_vie_verts[threadIdx.x].viz + next_v.viz)*(info.pos.x - nextpos.x)
+ (prev_v.viz + shared_vie_verts[threadIdx.x].viz)*(prevpos.x - info.pos.x)
+ (opp_v.viz + prev_v.viz)*(opppos.x - prevpos.x)
+ (next_v.viz + opp_v.viz)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (TESTIONVERTVISC) printf(
"iVertex %d area_quad %1.8E \n"
"our_v.x next prev opp %1.8E %1.8E %1.8E %1.8E gradvx %1.8E %1.8E\n"
"our_v.y next prev opp %1.8E %1.8E %1.8E %1.8E gradvy %1.8E %1.8E\n"
"our_v.z next prev opp %1.8E %1.8E %1.8E %1.8E gradvz %1.8E %1.8E\n"
"info.pos %1.8E %1.8E opppos %1.8E %1.8E prev %1.8E %1.8E next %1.8E %1.8E\n",
iVertex, area_quadrilateral,
shared_vie_verts[threadIdx.x].vxy.x, next_v.vxy.x, prev_v.vxy.x, opp_v.vxy.x,
gradvx.x, gradvx.y,
shared_vie_verts[threadIdx.x].vxy.y, next_v.vxy.y, prev_v.vxy.y, opp_v.vxy.y,
gradvy.x, gradvy.y,
shared_vie_verts[threadIdx.x].viz, next_v.viz, prev_v.viz, opp_v.viz,
gradviz.x, gradviz.y,
info.pos.x, info.pos.y, opppos.x, opppos.y, prevpos.x, prevpos.y, nextpos.x, nextpos.y);
htg_diff.x = shared_vie_verts[threadIdx.x].vxy.x - opp_v.vxy.x;
htg_diff.y = shared_vie_verts[threadIdx.x].vxy.y - opp_v.vxy.y;
htg_diff.z = shared_vie_verts[threadIdx.x].viz - opp_v.viz;
}
if (ita_par > 0.0) {
if ((VISCMAG == 0) || (omega_ci.dot(omega_ci) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradviz.x);
Pi_zy = -ita_par*(gradviz.y);
f64_vec3 visc_contrib;
visc_contrib.x = -over_m_i*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
visc_contrib.y = -over_m_i*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
visc_contrib.z = -over_m_i*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
//if (info.flag == OUTERMOST) {
// if (p_info_minor[izTri[i]].flag == DOMAIN_TRIANGLE)
// {
// ownrates_visc += visc_contrib;
// visc_htg += -THIRD*m_ion*(
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z);
// // do not look into frill
// }
// else {
// visc_contrib.x = 0.0; visc_contrib.y = 0.0; visc_contrib.z = 0.0;
// }
//} else
{
ownrates_visc += visc_contrib;
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z);
}
//
// if (TEST)
// printf("iVertex %d tri %d ION ita_par %1.9E \n"
// "gradvx %1.8E %1.8E gradvy %1.8E %1.8E gradvz %1.8E %1.8E\n"
// "edgenormal %1.8E %1.8E opp_viz %1.10E our_viz %1.10E\n"
// "ourpos %1.8E %1.8E opp pos %1.8E %1.8E\n"
// "Pi_xx %1.8E xy %1.8E yy %1.8E zx %1.8E\n"
// "visc_contrib %1.9E %1.9E %1.9E visc_htg %1.10E\n"
// "===\n",
// iVertex, izTri[i], ita_par, gradvx.x, gradvx.y, gradvy.x, gradvy.y,
// gradviz.x, gradviz.y,
// edge_normal.x, edge_normal.y, opp_v.viz, our_v.viz,
// info.pos.x,info.pos.y, opppos.x,opppos.y,
// Pi_xx, Pi_xy, Pi_yy, Pi_zx,
// visc_contrib.x, visc_contrib.y, visc_contrib.z, visc_htg
// );
////
// So we are saying if edge_normal.x > 0 and gradviz.x > 0
// then Pi_zx < 0 then ownrates += a positive amount. That is correct.
}
else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
// but we can make do with 3x partials
// 2. Now get partials in magnetic coordinates
f64 omegamod;
{
//f64_vec2 edge_normal;
//edge_normal.x = THIRD * (nextpos.y - prevpos.y);
//edge_normal.y = THIRD * (prevpos.x - nextpos.x);
f64 omegasq = omega_ci.dot(omega_ci);
omegamod = sqrt(omegasq);
unit_b = omega_ci / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// We picked edge_normal to be unit_perp.
// Is that at all valid?
// It seems like an arbitrary choice. Since B is in the plane, it's saying we picked perp in the plane, H = z.
// store omegamod instead.
// ita_perp = FACTOR_PERP * ita_par * nu*nu / (omegasq + nu*nu);
// ita_cross = FACTOR_HALL * ita_par * nu*omegamod / (omegasq + nu*nu);
}
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradviz);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
if (TESTIONVERTVISC)
printf("dvperp_by_db %1.8E W_bP %1.8E \n",
dvperp_by_db, W_bP);
if (TESTIONVERTVISC)
printf("dvHall_by_db %1.8E W_bH %1.8E \n",
dvHall_by_db, W_bH);
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradviz);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
if (TESTIONVERTVISC)
printf("dvb_by_dperp %1.8E W_bP %1.8E \n",
dvb_by_dperp, W_bP);
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradviz);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
if (TESTIONVERTVISC)
printf("dvb_by_dHall %1.8E W_bH %1.8E \n",
dvb_by_dHall, W_bH);
}
}
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
if (TESTIONVERTVISC)
printf(" -ita_2 %1.8E W_bP %1.8E contrib %1.8E Pi_P_b %1.8E \n",
-ita_2, W_bP, -ita_2*W_bP, Pi_P_b);
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
if (TESTIONVERTVISC)
printf(" -ita_4 %1.8E W_bH %1.8E contrib %1.8E Pi_P_b %1.8E nu %1.8E omega %1.8E \n",
-ita_4, W_bH, -ita_4*W_bH, Pi_P_b, nu, omegamod);
Pi_H_b += ita_4*W_bP;
}
}
} // scope W
// All we want left over at this point is Pi .. and unit_b
f64 momflux_b, momflux_perp, momflux_Hall;
{
// Most efficient way: compute mom flux in magnetic coords
f64_vec3 mag_edge;
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall
// is the flow of p_x dotted with the edge_normal
// ownrates will be divided by N to give dv/dt
// m N dvx/dt = integral div momflux_x
// Therefore divide here just by m
f64_vec3 visc_contrib;
visc_contrib.x = over_m_i*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
visc_contrib.y = over_m_i*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
visc_contrib.z = over_m_i*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
//if (info.flag == OUTERMOST) {
// if (p_info_minor[izTri[i]].flag == DOMAIN_TRIANGLE) {
// ownrates_visc += visc_contrib;
// visc_htg += -TWOTHIRDS*m_ion*(
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z); // Claim all visc htg for this vertcell
// }
//} else
{
ownrates_visc += visc_contrib;
visc_htg += -TWOTHIRDS*m_ion*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z); // Claim all visc htg for this vertcell
}
if (TESTIONVERTVISC) {
printf("iVertex %d tri %d ION ita_par %1.9E omega %1.9E %1.9E %1.9E nu %1.9E ourpos %1.8E %1.8E \n"
"unit_b %1.8E %1.8E %1.8E unit_perp %1.8E %1.8E %1.8E unit_Hall %1.8E %1.8E %1.8E\n"
"Pi_b_b %1.8E Pi_P_b %1.8E Pi_P_P %1.8E Pi_H_b %1.8E Pi_H_P %1.8E Pi_H_H %1.8E\n"
"momflux b %1.8E perp %1.8E cross %1.8E visc_contrib %1.9E %1.9E %1.9E \n",
iVertex, izTri[i], ita_par, omega_ci.x, omega_ci.y, omega_ci.z, nu,
info.pos.x, info.pos.y,
unit_b.x, unit_b.y, unit_b.z, unit_perp.x, unit_perp.y, unit_perp.z, unit_Hall.x, unit_Hall.y, unit_Hall.z,
Pi_b_b, Pi_P_b, Pi_P_P, Pi_H_b, Pi_H_P, Pi_H_H,
momflux_b, momflux_perp, momflux_Hall,
visc_contrib.x, visc_contrib.y, visc_contrib.z
);
printf(
"htgdiff %1.10E %1.10E %1.10E htg %1.10E \n===========================\n",
htg_diff.x,
htg_diff.y,
htg_diff.z,
-TWOTHIRDS*m_ion*(htg_diff.dot(visc_contrib))
);
}
//
}
}; // was ita_par == 0
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
// Just leaving these but they won't do anything :
//prevpos = opppos;
//prev_v = opp_v;
//opppos = nextpos;
//opp_v = next_v;
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
// if (TEST)
// printf("%d ion ownrates %1.8E %1.8E %1.8E ownrates_visc %1.8E %1.8E %1.8E our_v %1.8E %1.8E %1.8E\n",
// iVertex, ownrates.x, ownrates.y, ownrates.z, ownrates_visc.x, ownrates_visc.y, ownrates_visc.z, our_v.vxy.x, our_v.vxy.y, our_v.viz);
ownrates += ownrates_visc;
memcpy(p_MAR_ion + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
p_NT_addition_rate[iVertex].NiTi += visc_htg;
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex %d NaN ownrates.x\n",iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex %d NAN VISC HTG\n", iVertex);
#endif
} else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
{
long izNeighMinor[6];
char szPBC[6];
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS))
&& (shared_ita_par[threadIdx.x] > 0.0)){
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_vec3 omega_ci;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (short i = 0; i < 6; i++)
{
if (TESTIONVISC) printf("start loop %d: ownrates.x %1.9E", i, ownrates_visc.x);
bool bUsableSide = true;
{
f64_vec2 opp_B(0.0, 0.0);
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
// USEFUL:
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
f64 ita_par_opp = p_ita_parallel_ion_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_ion_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 gradvx, gradvy, gradviz;
f64_vec2 edge_normal;
f64_vec3 htg_diff;
if (bUsableSide)
{
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
v4 prev_v, opp_v, next_v;
f64_vec2 prevpos, nextpos, opppos;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vie[izNeighMinor[iprev] - StartMinor]), sizeof(v4));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vie_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_vie_minor[izNeighMinor[iprev]]), sizeof(v4));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
}
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vie[izNeighMinor[i] - StartMinor]), sizeof(v4));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vie_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_vie_minor[izNeighMinor[i]]), sizeof(v4));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
}
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vie[izNeighMinor[inext] - StartMinor]), sizeof(v4));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vie_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_vie_minor[izNeighMinor[inext]]), sizeof(v4));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
}
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.x = 0.5*(
(shared_vie[threadIdx.x].viz + next_v.viz)*(info.pos.y - nextpos.y)
+ (prev_v.viz + shared_vie[threadIdx.x].viz)*(prevpos.y - info.pos.y)
+ (opp_v.viz + prev_v.viz)*(opppos.y - prevpos.y)
+ (next_v.viz + opp_v.viz)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.y = -0.5*(
(shared_vie[threadIdx.x].viz + next_v.viz)*(info.pos.x - nextpos.x)
+ (prev_v.viz + shared_vie[threadIdx.x].viz)*(prevpos.x - info.pos.x)
+ (opp_v.viz + prev_v.viz)*(opppos.x - prevpos.x)
+ (next_v.viz + opp_v.viz)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
if (prev_v.vxy.x == 0.0) // prev is in the insulator.
{
// do like the above but it goes (ours, next, opp) somehow?
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
gradvx.x = 0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (opp_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(opppos.y - info.pos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (opp_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(opppos.x - info.pos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x)
) / area_triangle;
gradvy.x = 0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (opp_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(opppos.y - info.pos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (opp_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(opppos.x - info.pos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x)
) / area_triangle;
gradviz.x = 0.5*(
(shared_vie[threadIdx.x].viz + next_v.viz)*(info.pos.y - nextpos.y)
+ (opp_v.viz + shared_vie[threadIdx.x].viz)*(opppos.y - info.pos.y)
+ (next_v.viz + opp_v.viz)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradviz.y = -0.5*(
(shared_vie[threadIdx.x].viz + next_v.viz)*(info.pos.x - nextpos.x)
+ (opp_v.viz + shared_vie[threadIdx.x].viz)*(opppos.x - info.pos.x)
+ (next_v.viz + opp_v.viz)*(nextpos.x - opppos.x)
) / area_triangle;
} else {
if (next_v.vxy.x == 0.0) // next is in the insulator
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
gradvx.x = 0.5*(
(prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vxy.x + opp_v.vxy.x)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vxy.x + opp_v.vxy.x)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.x = 0.5*(
(prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vxy.y + opp_v.vxy.y)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vxy.y + opp_v.vxy.y)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradviz.x = 0.5*(
(prev_v.viz + shared_vie[threadIdx.x].viz)*(prevpos.y - info.pos.y)
+ (opp_v.viz + prev_v.viz)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].viz + opp_v.viz)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradviz.y = -0.5*(
(prev_v.viz + shared_vie[threadIdx.x].viz)*(prevpos.x - info.pos.x)
+ (opp_v.viz + prev_v.viz)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].viz + opp_v.viz)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
} else {
printf("\n\n\nDid not make sense! Alert RING-TAILED LEMUR. iMinor %d iNiegh %d \n\n\n\a", iMinor,
izNeighMinor[i]);
};
};
};
};
if (TESTIONVISC) printf("--------------\n%d %d our v: %1.8E %1.8E %1.8E "
"opp v: %1.8E %1.8E %1.8E \n",
iMinor, i, shared_vie[threadIdx.x].vxy.x, shared_vie[threadIdx.x].vxy.y, shared_vie[threadIdx.x].viz,
opp_v.vxy.x, opp_v.vxy.y, opp_v.viz);
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
htg_diff.x = shared_vie[threadIdx.x].vxy.x - opp_v.vxy.x;
htg_diff.y = shared_vie[threadIdx.x].vxy.y - opp_v.vxy.y;
htg_diff.z = shared_vie[threadIdx.x].viz - opp_v.viz;
} else {
if (TESTIONVISC) printf("side not usable: %d", i);
};
if (bUsableSide) {
if ((VISCMAG == 0) || (omega_ci.dot(omega_ci) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradviz.x);
Pi_zy = -ita_par*(gradviz.y);
f64_vec3 visc_contrib;
visc_contrib.x = -over_m_i*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
visc_contrib.y = -over_m_i*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
visc_contrib.z = -over_m_i*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
//// if (info.flag == CROSSING_INS) {
//// char flag = p_info_minor[izNeighMinor[i]].flag;
//// if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX)) {
//// ownrates_visc += visc_contrib;
////
//// if (TESTIONVISC) printf("UNMAGNETIZED visc_contrib.x %1.9E ownrates %1.9E\n",
//// visc_contrib.x, ownrates_visc.x);
////
//// if (i % 2 == 0) {
//// // vertex : heat collected by vertex
//// }
//// else {
//// visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
////// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
//// // + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
//// // + (our_v.viz - opp_v.viz)*visc_contrib.z);
//// // And we are going to give it to what? Just spread it out after.
////
//// }
//// }
//// else {
//// // DO NOTHING
//// }
//// } else {
ownrates_visc += visc_contrib;
if (i % 2 == 0) {
// vertex : heat collected by vertex
}
else {
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z);
}
//if (TESTTRI) {
// printf("iMinor %d %d "
// " ita_par %1.11E nu %1.11E omega %1.9E %1.9E %1.9E \n"
// "gradvx %1.9E %1.9E our vx %1.9E theirs %1.9E\n"
// "gradvy %1.9E %1.9E our vy %1.9E theirs %1.9E\n"
// "gradvz %1.9E %1.9E our vz %1.9E theirs %1.9E\n"
// "visc contrib %1.10E %1.10E %1.10E\n"
// "visc htg %1.10E %1.10E %1.10E | running %1.10E \n"
// " *************************************** \n",
// iMinor, izNeighMinor[i],
// ita_par, // Think nu is what breaks it
// nu, omega_ci.x, omega_ci.y, omega_ci.z,
// gradvx.x, gradvx.y, our_v.vxy.x, opp_v.vxy.x,
// gradvy.x, gradvy.y, our_v.vxy.y, opp_v.vxy.y,
// gradviz.x, gradviz.y, our_v.viz, opp_v.viz,
// visc_contrib.x, visc_contrib.y, visc_contrib.z,
// -THIRD*m_ion*(our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x,
// -THIRD*m_ion*(our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y,
// -THIRD*m_ion*(our_v.viz - opp_v.viz)*visc_contrib.z,
// visc_htg
// );
// printf("iMinor %d visc_contrib.z %1.10E our-opp %1.10E z htg %1.10E | running %1.10E \n"
// " *************************************** \n",
// iMinor, visc_contrib.z, our_v.viz - opp_v.viz,
// -(our_v.viz - opp_v.viz)*THIRD*m_ion*visc_contrib.z,
// visc_htg);
// }
// So we are saying if edge_normal.x > 0 and gradviz.x > 0
// then Pi_zx < 0 then ownrates += a positive amount. That is correct.
} else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64 omegasq = omega_ci.dot(omega_ci);
omegamod = sqrt(omegasq);
unit_b = omega_ci / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradviz);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradviz);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradviz);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
//if (TESTTRI)
// printf("iMinor %d %d edge_normal %1.10E %1.10E mag_edge (b,P,H) %1.10E %1.10E %1.10E\n"
// "Pi_b_b %1.10E Pi_b_P %1.10E Pi_b_H %1.10E \n"
// "Pi_P_b %1.10E Pi_P_P %1.10E Pi_P_H %1.10E \n"
// "Pi_H_b %1.10E Pi_H_P %1.10E Pi_H_H %1.10E \n",
// iMinor, izNeighMinor[i], edge_normal.x, edge_normal.y, mag_edge.x, mag_edge.y, mag_edge.z,// b,P,H
// Pi_b_b, Pi_P_b, Pi_H_b,
// Pi_P_b, Pi_P_P, Pi_H_P,
// Pi_H_b, Pi_H_P, Pi_H_H);
}
// Time to double-check carefully the signs.
// Pi was defined with - on dv/dx and we then dot that with the edge_normal, so giving + if we are higher than outside.
// unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall
// is the flow of p_x dotted with the edge_normal
// ownrates will be divided by N to give dv/dt
// m N dvx/dt = integral div momflux_x
// Therefore divide here just by m
f64_vec3 visc_contrib;
visc_contrib.x = over_m_i*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
visc_contrib.y = over_m_i*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
visc_contrib.z = over_m_i*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
if (TESTIONVISC)
printf("%d %d over_m_i %1.9E "
// "unit_b %1.9E %1.9E %1.9E \n"
// "unit_perp %1.9E %1.9E %1.9E \n"
// "unit_Hall %1.9E %1.9E %1.9E \n"
// "momflux_b %1.9E momflux_perp %1.9E momflux_Hall %1.9E\n"
"ita_par %1.10E visc_contrib.x %1.10E \n",
iMinor, izNeighMinor[i], over_m_i,
// unit_b.x, unit_b.y, unit_b.z,
// unit_perp.x, unit_perp.y, unit_perp.z,
// unit_Hall.x, unit_Hall.y, unit_Hall.z,
// momflux_b, momflux_perp, momflux_Hall,
ita_par, visc_contrib.x );
ownrates_visc += visc_contrib;
if (i % 2 != 0) // not vertex
{
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
if (TESTIONVISC)
printf("%d %d visc_htg %1.10E\n", iMinor,i, -THIRD*m_ion*(htg_diff.dot(visc_contrib)));
}
}
}; // bUsableSide
// endpt0 = endpt1;
// prevpos = opppos;
// prev_v = opp_v;
// opppos = nextpos;
// opp_v = next_v;
};
f64_vec3 ownrates;
memcpy(&ownrates,&(p_MAR_ion[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_ion[iMinor]), &(ownrates), sizeof(f64_vec3));
p_NT_addition_tri[iMinor].NiTi += visc_htg;
// Barking mad --- we never made special allowance yet for if a prev point is in insulator.
//___________________________________________________________________________________________
// We will have to round this up into the vertex heat afterwards.
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor %d NAN VISC HTG\n", iMinor);
#endif
// We do best by taking each boundary, considering how
// much heat to add for each one.
} else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
__syncthreads();
// Now do electron: overwrite ita and nu, copy-paste the above codes very carefully
shared_ita_par[threadIdx.x] = p_ita_parallel_elec_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_elec_minor[iMinor];
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) // keeping consistent with ion above where we did put OUTERMOST here
{// but we set ita to 0 in the pre routine for outermost.
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
}
else {
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len; // ?!
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
if ((info.flag == DOMAIN_VERTEX) && (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
// f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
#pragma unroll
for (int i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
f64_vec3 omega_ce;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
opp_ita = shared_ita_par[izTri[i] - StartMinor];
opp_nu = shared_nu[izTri[i] - StartMinor];
//ita_par = 0.5*(shared_ita_par_verts[threadIdx.x] + shared_ita_par[izTri[i] - StartMinor]);
//nu = 0.5*(shared_nu_verts[threadIdx.x] + shared_nu[izTri[i] - StartMinor]);
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izTri[i]];
opp_nu = p_nu_elec_minor[izTri[i]];
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par_verts[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 gradvx, gradvy, gradvez;
f64_vec2 edge_normal;
f64_vec3 htg_diff;
if (ita_par > 0.0)
{
v4 prev_v, next_v, opp_v;
f64_vec2 prevpos, nextpos, opppos;
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vie[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_vie_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vie[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_vie_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vie[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_vie_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
}
// All same as ion here:
// Order of calculations may help things to go out/into scope at the right times so careful with that.
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(shared_vie_verts[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.x + shared_vie_verts[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_vie_verts[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.x + shared_vie_verts[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_vie_verts[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.y + shared_vie_verts[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_vie_verts[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.y + shared_vie_verts[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.x = 0.5*(
(shared_vie_verts[threadIdx.x].vez + next_v.vez)*(info.pos.y - nextpos.y)
+ (prev_v.vez + shared_vie_verts[threadIdx.x].vez)*(prevpos.y - info.pos.y)
+ (opp_v.vez + prev_v.vez)*(opppos.y - prevpos.y)
+ (next_v.vez + opp_v.vez)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.y = -0.5*(
(shared_vie_verts[threadIdx.x].vez + next_v.vez)*(info.pos.x - nextpos.x)
+ (prev_v.vez + shared_vie_verts[threadIdx.x].vez)*(prevpos.x - info.pos.x)
+ (opp_v.vez + prev_v.vez)*(opppos.x - prevpos.x)
+ (next_v.vez + opp_v.vez)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (TESTVISC) printf("%d our v %1.8E %1.8E %1.8E oppv %1.8E %1.8E %1.8E \n",
izTri[i],
shared_vie_verts[threadIdx.x].vxy.x, shared_vie_verts[threadIdx.x].vxy.y,
shared_vie_verts[threadIdx.x].vez, opp_v.vxy.x, opp_v.vxy.y, opp_v.vez);
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
htg_diff.x = shared_vie_verts[threadIdx.x].vxy.x - opp_v.vxy.x;
htg_diff.y = shared_vie_verts[threadIdx.x].vxy.y - opp_v.vxy.y;
htg_diff.z = shared_vie_verts[threadIdx.x].vez - opp_v.vez;
}
// f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (ita_par > 0.0) {
if ((VISCMAG == 0) || (omega_ce.dot(omega_ce) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
// Let's suppose, Pi_yx means the rate of flow of y-momentum in the x direction.
// Thus when we want to know how much y momentum is flowing through the wall we take
// Pi_yx.edge_x + Pi_yy.edge_y -- reasonable.
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradvez.x);
Pi_zy = -ita_par*(gradvez.y);
f64_vec3 visc_contrib;
visc_contrib.x = -over_m_e*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
visc_contrib.y = -over_m_e*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
visc_contrib.z = -over_m_e*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
// if (info.flag == OUTERMOST) {
// if (p_info_minor[izTri[i]].flag == DOMAIN_TRIANGLE) {
// ownrates_visc += visc_contrib;
//
// visc_htg += -TWOTHIRDS*m_e*(
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.vez - opp_v.vez)*visc_contrib.z);
// }
// else {
// visc_contrib.x = 0.0; visc_contrib.y = 0.0; visc_contrib.z = 0.0;
// }
// } else
{
ownrates_visc += visc_contrib;
visc_htg += -TWOTHIRDS*m_e*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.vez - opp_v.vez)*visc_contrib.z);
};
// The alternative, that may or may not run faster, is to test for ita == 0 before we do all the calcs
// and then set ita == 0 in all the places not to look, including OUTERMOST, and do not do traffic to or from it.
//
if (0) // (TESTVISC)
printf("iVertex %d tri %d ELEC ita_par %1.9E own ita %1.9E\n"
"gradvx %1.8E %1.8E gradvy %1.8E %1.8E gradvez %1.8E %1.8E\n"
"edgenormal %1.8E %1.8E\n"
"Pi_xx %1.8E xy %1.8E yy %1.8E zx %1.8E\n"
"visc_contrib %1.9E %1.9E %1.9E \n"
"htg cum %1.9E heating %1.9E \n"
"===\n",
iVertex, izTri[i], ita_par, shared_ita_par_verts[threadIdx.x],
gradvx.x, gradvx.y, gradvy.x, gradvy.y, gradvez.x, gradvez.y,
edge_normal.x, edge_normal.y,
Pi_xx, Pi_xy, Pi_yy, Pi_zx,
visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg,
-TWOTHIRDS*m_e*(htg_diff.dot(visc_contrib))
);
// -= !!!
// So we are saying if edge_normal.x > 0 and gradviz.x > 0
// then Pi_zx < 0 then ownrates += a positive amount. That is correct.
}
else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64 omegasq = omega_ce.dot(omega_ce);
omegamod = sqrt(omegasq);
unit_b = omega_ce / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradvez);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradvez);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradvez);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y; // b component
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y; // P component
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y; // H component
// verify for chosen edge that we obtained a 3-vector of the same length as the original edge!
// Tick
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
f64_vec3 visc_contrib;
visc_contrib.x = over_m_e*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
visc_contrib.y = over_m_e*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
visc_contrib.z = over_m_e*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
//if (info.flag == OUTERMOST) {
// if (p_info_minor[izTri[i]].flag == DOMAIN_TRIANGLE) {
// ownrates_visc += visc_contrib;
// visc_htg += -TWOTHIRDS*m_e*(
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.vez - opp_v.vez)*visc_contrib.z);
// }
//}else
{
ownrates_visc += visc_contrib;
visc_htg += -TWOTHIRDS*m_e*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.vez - opp_v.vez)*visc_contrib.z);
};
if (TESTVISC) {
// Most efficient way: compute mom flux in magnetic coords
printf("iVertex %d MAGNETIZED elec: visc contrib %1.8E %1.8E %1.8E\n"
"htg cum %1.9E visc htg %1.9E ita_par %1.9E \n"
"gradvx %1.8E %1.8E gradvy %1.8E %1.8E gradvez %1.8E %1.8E\n"
"unit_b %1.8E %1.8E %1.8E unit_perp %1.8E %1.8E %1.8E unit_H %1.8E %1.8E %1.8E\n"
"omega_ce %1.8E %1.8E %1.8E mod %1.8E nu %1.8E \n"
"===\n",
iVertex, visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg, -TWOTHIRDS*m_e*(htg_diff.dot(visc_contrib)),
ita_par,
gradvx.x, gradvx.y, gradvy.x, gradvy.y, gradvez.x, gradvez.y,
unit_b.x, unit_b.y, unit_b.z, unit_perp.x, unit_perp.y, unit_perp.z, unit_Hall.x, unit_Hall.y, unit_Hall.z,
omega_ce.x, omega_ce.y, omega_ce.z, omega_ce.modulus(), nu);
}
//
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
}
}; // ita_par > 0.0
// endpt0 = endpt1;
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_elec[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
//
if (TESTVISC)
printf("iVertex %d ownrates %1.8E %1.8E %1.8E ownrates_visc %1.8E %1.8E %1.8E htg %1.8E \n",
iVertex, ownrates.x, ownrates.y, ownrates.z, ownrates_visc.x, ownrates_visc.y, ownrates_visc.z, visc_htg);
ownrates += ownrates_visc;
memcpy(p_MAR_elec + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
if (TESTVISC) printf("iVertex %d NeTe recorded %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NeTe);
p_NT_addition_rate[iVertex].NeTe += visc_htg;
if (TESTVISC) printf("iVertex %d NeTe recorded %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NeTe);
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex e %d NaN ownrates.x\n", iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex e %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex e %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex e %d NAN VISC HTG\n", iVertex);
#endif
} else {
// NOT domain vertex: Do nothing
};
};
// Electrons in tris:
info = p_info_minor[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
}
else {
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
short inext, iprev = 5, i = 0;
// f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
bool bUsableSide = true;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
opp_ita = shared_ita_par[izNeighMinor[i] - StartMinor];
opp_nu = shared_nu[izNeighMinor[i] - StartMinor];
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_ita = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izNeighMinor[i]];
opp_nu = p_nu_elec_minor[izNeighMinor[i]];
if (opp_ita == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 gradvez, gradvx, gradvy;
f64_vec2 edge_normal; // a reason why storing position > loading.
f64_vec3 htg_diff;
if (bUsableSide)
{
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
v4 opp_v, next_v, prev_v;
f64_vec2 opppos, nextpos, prevpos;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vie[izNeighMinor[iprev] - StartMinor]), sizeof(v4));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vie_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_vie_minor[izNeighMinor[iprev]]), sizeof(v4));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
}
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vie[izNeighMinor[i] - StartMinor]), sizeof(v4));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vie_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_vie_minor[izNeighMinor[i]]), sizeof(v4));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
}
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vie[izNeighMinor[inext] - StartMinor]), sizeof(v4));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vie_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_vie_minor[izNeighMinor[inext]]), sizeof(v4));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
}
// New definition of endpoint of minor edge:
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.x = 0.5*(
(shared_vie[threadIdx.x].vez + next_v.vez)*(info.pos.y - nextpos.y)
+ (prev_v.vez + shared_vie[threadIdx.x].vez)*(prevpos.y - info.pos.y)
+ (opp_v.vez + prev_v.vez)*(opppos.y - prevpos.y)
+ (next_v.vez + opp_v.vez)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.y = -0.5*(
(shared_vie[threadIdx.x].vez + next_v.vez)*(info.pos.x - nextpos.x)
+ (prev_v.vez + shared_vie[threadIdx.x].vez)*(prevpos.x - info.pos.x)
+ (opp_v.vez + prev_v.vez)*(opppos.x - prevpos.x)
+ (next_v.vez + opp_v.vez)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
if (prev_v.vxy.x == 0.0) // prev is in the insulator.
{
// do like the above but it goes (ours, next, opp) somehow?
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
gradvx.x = 0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (opp_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(opppos.y - info.pos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (opp_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(opppos.x - info.pos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x)
) / area_triangle;
gradvy.x = 0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (opp_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(opppos.y - info.pos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (opp_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(opppos.x - info.pos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x)
) / area_triangle;
gradvez.x = 0.5*(
(shared_vie[threadIdx.x].vez + next_v.vez)*(info.pos.y - nextpos.y)
+ (opp_v.vez + shared_vie[threadIdx.x].vez)*(opppos.y - info.pos.y)
+ (next_v.vez + opp_v.vez)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvez.y = -0.5*(
(shared_vie[threadIdx.x].vez + next_v.vez)*(info.pos.x - nextpos.x)
+ (opp_v.vez + shared_vie[threadIdx.x].vez)*(opppos.x - info.pos.x)
+ (next_v.vez + opp_v.vez)*(nextpos.x - opppos.x)
) / area_triangle;
}
else {
if (next_v.vxy.x == 0.0) // next is in the insulator
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
gradvx.x = 0.5*(
(prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vxy.x + opp_v.vxy.x)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vxy.x + opp_v.vxy.x)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.x = 0.5*(
(prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vxy.y + opp_v.vxy.y)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vxy.y + opp_v.vxy.y)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradvez.x = 0.5*(
(prev_v.vez + shared_vie[threadIdx.x].vez)*(prevpos.y - info.pos.y)
+ (opp_v.vez + prev_v.vez)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vez + opp_v.vez)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvez.y = -0.5*(
(prev_v.vez + shared_vie[threadIdx.x].vez)*(prevpos.x - info.pos.x)
+ (opp_v.vez + prev_v.vez)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vez + opp_v.vez)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
} else {
printf("\n\n\nDid not make sense! Alert RING-TAILED LEMUR. iMinor %d iNeigh %d \n\n\n\a", iMinor,
izNeighMinor[i]);
};
};
};
};
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
if (TEST_ELEC_VISC_TRI) printf("%d prev_v %1.14E opp_v %1.14E next_v %1.14E our_v %1.14E omega %1.8E %1.8E\n",
iMinor, prev_v.vez, opp_v.vez, next_v.vez, shared_vie[threadIdx.x].vez,
omega_ce.x, omega_ce.y);
htg_diff.x = shared_vie[threadIdx.x].vxy.x - opp_v.vxy.x;
htg_diff.y = shared_vie[threadIdx.x].vxy.y - opp_v.vxy.y;
htg_diff.z = shared_vie[threadIdx.x].vez - opp_v.vez;
}
// Wouldn't it be nice if we could now drop all our prev_v variables and pick them up again on the next
// go around?
// That's really what I think we need.
if (bUsableSide)
{
if ((VISCMAG == 0) || (omega_ce.dot(omega_ce) < 0.1*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradvez.x);
Pi_zy = -ita_par*(gradvez.y);
f64_vec3 visc_contrib;
visc_contrib.x = -over_m_e*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
visc_contrib.y = -over_m_e*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
visc_contrib.z = -over_m_e*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
ownrates_visc += visc_contrib;
if (i % 2 != 0)
visc_htg += -THIRD*m_e*(htg_diff.dot(visc_contrib));
if (TESTVISC)
printf("\n%d : %d : ita %1.8E gradvz %1.9E %1.9E ourpos %1.9E %1.9E visc_contrib.z %1.10E visc_htg %1.10E\n",
iMinor, izNeighMinor[i], ita_par,
gradvez.x,gradvez.y, info.pos.x,info.pos.y,
visc_contrib.z, visc_htg);
// 42939: Find out why it makes too much heat. Probably a compound error.
// if (iMinor == 42939) printf("42939\nour_v %1.8E %1.8E %1.8E \n"
// "opp_v %1.8E %1.8E %1.8E \n"
// "visc_contrib %1.8E %1.8E %1.8E \n",
// our_v.vxy.x, our_v.vxy.y, our_v.vez,
// opp_v.vxy.x, opp_v.vxy.y, opp_v.vez,
// visc_contrib.x, visc_contrib.y, visc_contrib.z);
//
} else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
// f64_vec2 edge_normal;
// edge_normal.x = THIRD * (nextpos.y - prevpos.y);
// edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
f64 omegasq = omega_ce.dot(omega_ce);
omegamod = sqrt(omegasq);
unit_b = omega_ce / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradvez);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradvez);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradvez);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
// f64_vec2 edge_normal;
// edge_normal.x = THIRD * (nextpos.y - prevpos.y);
// edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall
// is the flow of p_x dotted with the edge_normal
// ownrates will be divided by N to give dv/dt
// m N dvx/dt = integral div momflux_x
// Therefore divide here just by m
f64_vec3 visc_contrib;
visc_contrib.x = over_m_e*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
visc_contrib.y = over_m_e*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
visc_contrib.z = over_m_e*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
ownrates_visc += visc_contrib;
if (TEST_ELEC_VISC_TRI) printf(
"%d ownrates_visc.z %1.14E visc_contrib.z %1.14E 1/m_e %1.14E\n"
"unit_b.z %1.14E unit_perp.z %1.14E unit_Hall.z %1.14E\n"
"momflux b perp Hall %1.14E %1.14E %1.14E gradvez %1.14E %1.14E\n",
iMinor, ownrates_visc.z, visc_contrib.z, over_m_e, unit_b.z,
unit_perp.z, unit_Hall.z, momflux_b, momflux_perp, momflux_Hall,
gradvez.x, gradvez.y);
if (i % 2 != 0)
visc_htg += -THIRD*m_e*(htg_diff.dot(visc_contrib));
}
}; // bUsableSide
// endpt0 = endpt1;
};
f64_vec3 ownrates;
memcpy(&(ownrates), &(p_MAR_elec[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_elec[iMinor]), &(ownrates), sizeof(f64_vec3));
p_NT_addition_tri[iMinor].NeTe += visc_htg;
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor e %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor e %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor e %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor e %d NAN VISC HTG\n", iMinor);
#endif
if (TESTVISC) {
if (ownrates.x != ownrates.x)
printf("iMinor e %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor e %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor e %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor e %d NAN VISC HTG\n", iMinor);
}
} else {
// Not domain, not crossing_ins, not a frill
} // non-domain tri
}; // was it FRILL
}
// Neutral routine:
__global__ void kernelCreate_neutral_viscous_contrib_to_MAR_and_NT(
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_v_n_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_neut_minor, //
f64 * __restrict__ p_nu_neut_minor, //
f64_vec3 * __restrict__ p_MAR_neut,
NTrates * __restrict__ p_NT_addition_rate,
NTrates * __restrict__ p_NT_addition_tri)
{
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// There is room for some more double in shared per thread.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 ownrates_visc;
f64 visc_htg;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_ita_par[threadIdx.x] = p_ita_neut_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_neut_minor[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = p_ita_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
}
else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
// JUST TO GET IT TO RUN: LIMIT OURSELVES TO RADIUS 4.5 :
if ((info.flag == DOMAIN_VERTEX) && (info.pos.modulus() < 4.5)
&& (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
#pragma unroll
for (short i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
{
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
}
else {
f64 ita_theirs = p_ita_neut_minor[izTri[i]];
f64 nu_theirs = p_nu_neut_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = ita_theirs;
nu = nu_theirs;
};
// I understand why we are still doing minimum ita at the wall but we would ideally like to stop.
};
} // Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
f64_vec2 gradvx, gradvy, gradvz;
f64_vec3 htg_diff;
f64_vec2 edge_normal;
if (ita_par > 0.0) // note it was the minimum taken.
{
f64_vec3 opp_v, prev_v, next_v;
f64_vec2 opppos, prevpos, nextpos;
// ideally we might want to leave position out of the loop so that we can avoid reloading it.
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_v_n[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_v_n_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E izTri[i] %d \n", opp_v.x, izTri[i]);
}
else {
opp_v = p_v_n_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E v_n_minor izTri[i] %d \n", opp_v.x, izTri[i]);
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_v_n[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_v_n_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x);
gradvx.x = 0.5*(
(shared_v_n_verts[threadIdx.x].x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + shared_v_n_verts[threadIdx.x].x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_v_n_verts[threadIdx.x].x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + shared_v_n_verts[threadIdx.x].x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_v_n_verts[threadIdx.x].y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + shared_v_n_verts[threadIdx.x].y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_v_n_verts[threadIdx.x].y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + shared_v_n_verts[threadIdx.x].y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
//
// if (TEST) printf(
// "iVertex %d our_v.y next prev opp %1.8E %1.8E %1.8E %1.8E\n"
// "area_quad %1.8E \n"
// "info.pos %1.8E %1.8E opppos %1.8E %1.8E prev %1.8E %1.8E next %1.8E %1.8E\n",
// iVertex, our_v.vxy.y, next_v.vxy.y, prev_v.vxy.y, opp_v.vxy.y,
// area_quadrilateral,
// info.pos.x, info.pos.y, opppos.x, opppos.y, prevpos.x, prevpos.y, nextpos.x, nextpos.y);
//
gradvz.x = 0.5*(
(shared_v_n_verts[threadIdx.x].z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + shared_v_n_verts[threadIdx.x].z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvz.y = -0.5*(
(shared_v_n_verts[threadIdx.x].z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + shared_v_n_verts[threadIdx.x].z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
htg_diff.x = shared_v_n_verts[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n_verts[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n_verts[threadIdx.x].z - opp_v.z;
if (TESTNEUTVISC)
printf("============================\nNeutral viscosity %d tri %d ita_par %1.10E\n"
"v %1.9E %1.9E %1.9E opp_v %1.9E %1.9E %1.9E\n"
"gradvx %1.9E %1.9E gradvy %1.9E %1.9E gradvz %1.9E %1.9E \n"
"ourpos %1.8E %1.8E prevpos %1.8E %1.8E opppos %1.8E %1.8E nextpos %1.8E %1.8E edge_nor %1.9E %1.9E\n"
,
iVertex, izTri[i], ita_par,
shared_v_n_verts[threadIdx.x].x, shared_v_n_verts[threadIdx.x].y,
shared_v_n_verts[threadIdx.x].z, opp_v.x, opp_v.y, opp_v.z,
gradvx.x, gradvx.y, gradvy.x, gradvy.y, gradvz.x, gradvz.y,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
edge_normal.x, edge_normal.y);
}
// Order of calculations may help things to go out/into scope at the right times so careful with that.
// we also want to get nu from somewhere. So precompute nu at the time we precompute ita_e = n Te / nu_e, ita_i = n Ti / nu_i.
if (ita_par > 0.0)
{
// For neutral fluid viscosity does not involve dimensional transfers.
f64_vec3 visc_contrib;
visc_contrib.x = over_m_n*(ita_par*gradvx.dot(edge_normal)); // if we are looking at higher vz looking out, go up.
visc_contrib.y = over_m_n*(ita_par*gradvy.dot(edge_normal));
visc_contrib.z = over_m_n*(ita_par*gradvz.dot(edge_normal));
// if (iVertex == VERTCHOSEN) {
// printf("visc_contrib %1.9E %1.9E %1.9E ita %1.10E \n",
// visc_contrib.x, visc_contrib.y, visc_contrib.z, ita_par);
// }
ownrates_visc += visc_contrib;
visc_htg += -THIRD*m_n*(htg_diff.dot(visc_contrib));
if (TESTNEUTVISC)
printf("htg_diff %1.9E %1.9E %1.9E visc_contrib %1.9E %1.9E %1.9E visc_htg %1.10E\n"
,
htg_diff.x, htg_diff.y, htg_diff.z, visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg
);
}
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
p_NT_addition_rate[iVertex].NnTn += visc_htg;
if (TESTNEUTVISC) {
printf("%d : cumulative d/dt NnTn %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NnTn);
};
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex %d NaN ownrates.x\n", iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex %d NAN VISC HTG\n", iVertex);
#endif
}
else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
// memcpy(&(ownrates), &(p_MAR_ion[iMinor]), sizeof(f64_vec3));
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
{
long izNeighMinor[6];
char szPBC[6];
if (TESTNEUTVISC2) printf("%d info.flag %d ita_ours %1.8E \n", iMinor, info.flag, shared_ita_par[threadIdx.x]);
// JUST TO GET IT TO RUN:
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) &&
(info.pos.modulus() < 4.9) && (shared_ita_par[threadIdx.x] > 0.0)) {
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (short i = 0; i < 6; i++)
{
bool bUsableSide = true;
{
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
f64 ita_par_opp = p_ita_neut_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_neut_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
};
}
}
// basically bUsableSide here just depends on whether min(ita, ita_opp) == 0.
f64_vec2 gradvx, gradvy, gradvz;
f64_vec2 edge_normal;
f64_vec3 htg_diff;
if (bUsableSide)
{
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
f64_vec3 prev_v, opp_v, next_v;
f64_vec2 prevpos, nextpos, opppos;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_v_n[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_v_n_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_v_n_minor[izNeighMinor[iprev]]), sizeof(f64_vec3));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
};
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_v_n[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_v_n_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_v_n_minor[izNeighMinor[inext]]), sizeof(f64_vec3));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(shared_v_n[threadIdx.x].x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + shared_v_n[threadIdx.x].x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_v_n[threadIdx.x].x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + shared_v_n[threadIdx.x].x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_v_n[threadIdx.x].y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + shared_v_n[threadIdx.x].y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_v_n[threadIdx.x].y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + shared_v_n[threadIdx.x].y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvz.x = 0.5*(
(shared_v_n[threadIdx.x].z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + shared_v_n[threadIdx.x].z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvz.y = -0.5*(
(shared_v_n[threadIdx.x].z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + shared_v_n[threadIdx.x].z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (TESTNEUTVISC2) {
printf("%d i %d prev_v %1.10E our_v %1.10E opp_v %1.10E next_v %1.10E\n",
iMinor, i, prev_v.y, shared_v_n[threadIdx.x].y, opp_v.y, next_v.y);
};
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
gradvx.x = 0.0;
gradvx.y = 0.0;
gradvy.x = 0.0;
gradvy.y = 0.0;
gradvz.x = 0.0;
gradvz.y = 0.0;
bUsableSide = 0;
/*
if (prev_v.x == 0.0) // prev is in the insulator. ---- this seems like a dodgy way of trying to know this.
{
// do like the above but it goes (ours, next, opp) somehow?
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
gradvx.x = 0.5*(
(shared_v_n[threadIdx.x].x + next_v.x)*(info.pos.y - nextpos.y)
+ (opp_v.x + shared_v_n[threadIdx.x].x)*(opppos.y - info.pos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(shared_v_n[threadIdx.x].x + next_v.x)*(info.pos.x - nextpos.x)
+ (opp_v.x + shared_v_n[threadIdx.x].x)*(opppos.x - info.pos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x)
) / area_triangle;
gradvy.x = 0.5*(
(shared_v_n[threadIdx.x].y + next_v.y)*(info.pos.y - nextpos.y)
+ (opp_v.y + shared_v_n[threadIdx.x].y)*(opppos.y - info.pos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(shared_v_n[threadIdx.x].y + next_v.y)*(info.pos.x - nextpos.x)
+ (opp_v.y + shared_v_n[threadIdx.x].y)*(opppos.x - info.pos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x)
) / area_triangle;
gradvz.x = 0.5*(
(shared_v_n[threadIdx.x].z + next_v.z)*(info.pos.y - nextpos.y)
+ (opp_v.z + shared_v_n[threadIdx.x].z)*(opppos.y - info.pos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvz.y = -0.5*(
(shared_v_n[threadIdx.x].z + next_v.z)*(info.pos.x - nextpos.x)
+ (opp_v.z + shared_v_n[threadIdx.x].z)*(opppos.x - info.pos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x)
) / area_triangle;
if (TESTNEUTVISC2) {
printf("%d i %d PREVV=0 our_v %1.10E opp_v %1.10E next_v %1.10E\n",
iMinor, i, shared_v_n[threadIdx.x].y, opp_v.y, next_v.y);
};
}
else {
if (next_v.x == 0.0) // next is in the insulator
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
gradvx.x = 0.5*(
(prev_v.x + shared_v_n[threadIdx.x].x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (shared_v_n[threadIdx.x].x + opp_v.x)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(prev_v.x + shared_v_n[threadIdx.x].x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (shared_v_n[threadIdx.x].x + opp_v.x)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.x = 0.5*(
(prev_v.y + shared_v_n[threadIdx.x].y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (shared_v_n[threadIdx.x].y + opp_v.y)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(prev_v.y + shared_v_n[threadIdx.x].y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (shared_v_n[threadIdx.x].y + opp_v.y)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
if (TESTNEUTVISC2) {
printf("%d i %d NEXTV=0 our_v %1.10E opp_v %1.10E prev_v %1.10E\n",
iMinor, i, shared_v_n[threadIdx.x].y, opp_v.y, prev_v.y);
};
gradvz.x = 0.5*(
(prev_v.z + shared_v_n[threadIdx.x].z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (shared_v_n[threadIdx.x].z + opp_v.z)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvz.y = -0.5*(
(prev_v.z + shared_v_n[threadIdx.x].z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (shared_v_n[threadIdx.x].z + opp_v.z)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
}
else {
printf("\n\n\nDid not make sense! Alert RING-TAILED LEMUR. iMinor %d iNiegh %d \n"
"izNeighMinor[inext] %d izNeighMinor[iprev] %d flag %d %d \n"
"prev_v.x %1.8E next_v.x %1.8E \n"
"\n\n\a", iMinor,
izNeighMinor[i],
izNeighMinor[inext], izNeighMinor[iprev], p_info_minor[izNeighMinor[inext]].flag,
p_info_minor[izNeighMinor[iprev]].flag, prev_v.x, next_v.x);
};
};
*/
};
};
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
htg_diff.x = shared_v_n[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n[threadIdx.x].z - opp_v.z;
// if (iMinor == CHOSEN) printf("============================\nNeutral viscosity %d %d\n"
// "v.x %1.9E opp_v.x %1.9E prev_v.x %1.9E next_v.x %1.9E\n"
// "ourpos %1.9E %1.9E \n"
// "prevpos %1.9E %1.9E \n"
// "opppos %1.9E %1.9E \n"
// "nextpos %1.9E %1.9E \n"
// "gradvx %1.9E %1.9E gradvy %1.9E %1.9E edge_nor %1.9E %1.9E\n",
// iMinor, izNeighMinor[i],
// shared_v_n[threadIdx.x].x, opp_v.x, prev_v.x, next_v.x,
// info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
// gradvx.x, gradvx.y, gradvy.x, gradvy.y, edge_normal.x, edge_normal.y);
//
};
if (bUsableSide) {
f64_vec3 visc_contrib;
visc_contrib.x = over_m_n*ita_par*gradvx.dot(edge_normal);
visc_contrib.y = over_m_n*ita_par*gradvy.dot(edge_normal);
visc_contrib.z = over_m_n*ita_par*gradvz.dot(edge_normal);
// Set to 0 any that are pushing momentum uphill. For neutral this is unphysical.
// if (visc_contrib.x*htg_diff.x > 0.0) visc_contrib.x = 0.0;
// Can't do it because it'll ruin backward solve.
ownrates_visc += visc_contrib;
if (TESTNEUTVISC2) {
printf("%d i %d contrib.y %1.10E gradvy %1.10E %1.10E edge_nml %1.9E %1.9E ita %1.8E /m_n %1.8E cumu %1.9E\n",
iMinor, i, visc_contrib.y, gradvy.x, gradvy.y, edge_normal.x, edge_normal.y, ita_par, over_m_n, ownrates_visc.y);
};
if (i % 2 == 0) {
// vertex : heat collected by vertex
}
else {
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
};
}; // bUsableSide
};
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_neut[iMinor]), &(ownrates), sizeof(f64_vec3));
p_NT_addition_tri[iMinor].NnTn += visc_htg;
// We will have to round this up into the vertex heat afterwards.
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor %d NAN VISC HTG\n", iMinor);
#endif
// We do best by taking each boundary, considering how
// much heat to add for each one.
}
else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
}
__global__ void kernelExpandSelectFlagIta(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
int * __restrict__ p_iSelectFlag,
int * __restrict__ p_iSelectflagNeut,
int const number
) {
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
long izTri[MAXNEIGH_d];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
if (p_iSelectFlag[iVertex + BEGINNING_OF_CENTRAL] == 0) {
bool found = false;
for (short i = 0; ((i < tri_len) && (found == false)); i++)
{
if (p_iSelectFlag[izTri[i]] == number) {
found = true;
};
};
if (found) p_iSelectFlag[iVertex + BEGINNING_OF_CENTRAL] = number + 1;
};
if (p_iSelectflagNeut[iVertex + BEGINNING_OF_CENTRAL] == 0) {
bool found = false;
for (short i = 0; ((i < tri_len) && (found == false)); i++)
{
if (p_iSelectflagNeut[izTri[i]] == number) {
found = true;
};
};
if (found) p_iSelectflagNeut[iVertex + BEGINNING_OF_CENTRAL] = number + 1;
};
};
info = p_info_minor[iMinor];
long izNeighMinor[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
if (p_iSelectFlag[iMinor] == 0) {
bool found = false;
for (short i = 0; ((i < 6) && (found == false)); i++)
{
if (p_iSelectFlag[izNeighMinor[i]] == number) {
found = true;
};
};
if (found) p_iSelectFlag[iMinor] = number + 1;
};
if (p_iSelectflagNeut[iMinor] == 0) {
bool found = false;
for (short i = 0; ((i < 6) && (found == false)); i++)
{
if (p_iSelectflagNeut[izNeighMinor[i]] == number) {
found = true;
};
};
if (found) p_iSelectflagNeut[iMinor] = number + 1;
};
}
// Neutral routine:
__global__ void kernelCreate_neutral_viscous_contrib_to_MAR_and_NT_Geometric(
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_v_n_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_neut_minor, //
f64 * __restrict__ p_nu_neut_minor, //
f64_vec3 * __restrict__ p_MAR_neut,
NTrates * __restrict__ p_NT_addition_rate,
NTrates * __restrict__ p_NT_addition_tri,
int * __restrict__ p_Select)
{
// ************************************************************************
// *********** WATCH OUT ************************************************
// ************************************************************************
// A copy of this routine with fixed flows only is in heatflux.cu
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// There is room for some more double in shared per thread.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 ownrates_visc;
f64 visc_htg;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_ita_par[threadIdx.x] = p_ita_neut_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_neut_minor[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = p_ita_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
}
else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
// JUST TO GET IT TO RUN: LIMIT OURSELVES TO RADIUS 4.9 :
// !
if ((info.flag == DOMAIN_VERTEX)
// && (info.pos.modulus() < 4.9) -- if we have this then need in d/dbeta also.
&& (shared_ita_par_verts[threadIdx.x] > 0.0)
&& (p_Select[iVertex + BEGINNING_OF_CENTRAL] != 0)
)
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
#pragma unroll
for (short i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
// Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
if (p_Select[izTri[i]] != 0) {
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
}
else {
f64 ita_theirs = p_ita_neut_minor[izTri[i]];
f64 nu_theirs = p_nu_neut_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = ita_theirs;
nu = nu_theirs;
};
// I understand why we are still doing minimum ita at the wall but we would ideally like to stop.
};
f64_vec2 gradvx, gradvy, gradvz;
f64_vec3 htg_diff;
f64_vec2 edge_normal;
if (ita_par > 0.0) // note it was the minimum taken.
{
f64_vec3 opp_v, prev_v, next_v;
f64_vec2 opppos, prevpos, nextpos;
// ideally we might want to leave position out of the loop so that we can avoid reloading it.
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_v_n[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_v_n_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E izTri[i] %d \n", opp_v.x, izTri[i]);
}
else {
opp_v = p_v_n_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E v_n_minor izTri[i] %d \n", opp_v.x, izTri[i]);
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_v_n[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_v_n_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x);
if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
edge_normal = ReconstructEdgeNormal(prevpos, info.pos, nextpos, opppos);
#ifdef INS_INS_3POINT
if (TestDomainPos(prevpos) == false) {
gradvx = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n_verts[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n_verts[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n_verts[threadIdx.x].z, next_v.z, opp_v.z
);
}
else {
if (TestDomainPos(nextpos) == false) {
gradvx = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n_verts[threadIdx.x].x, opp_v.x
);
gradvy = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n_verts[threadIdx.x].y, opp_v.y
);
gradvz = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n_verts[threadIdx.x].z, opp_v.z
);
}
else {
gradvx = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n_verts[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n_verts[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n_verts[threadIdx.x].z, next_v.z, opp_v.z
);
};
};
#else
if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
{
// One of the sides is dipped under the insulator -- set transverse deriv to 0.
// Bear in mind we are looking from a vertex into a tri, it can be ins tri.
gradvx = (opp_v.x - shared_v_n_verts[threadIdx.x].x)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
gradvy = (opp_v.y - shared_v_n_verts[threadIdx.x].y)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
gradvz = (opp_v.z - shared_v_n_verts[threadIdx.x].z)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
}
else {
gradvx = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n_verts[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n_verts[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n_verts[threadIdx.x].z, next_v.z, opp_v.z
);
// Could switch to the 3 in one function that handles all 3. in one.
};
// Simplify:
#endif
htg_diff.x = shared_v_n_verts[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n_verts[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n_verts[threadIdx.x].z - opp_v.z;
if (TESTNEUTVISC)
printf("============================\nNeutral viscosity %d tri %d ita_par %1.10E\n"
"v %1.9E %1.9E %1.9E opp_v %1.9E %1.9E %1.9E\n"
"gradvx %1.9E %1.9E gradvy %1.9E %1.9E gradvz %1.9E %1.9E \n"
"ourpos %1.8E %1.8E prevpos %1.8E %1.8E opppos %1.8E %1.8E nextpos %1.8E %1.8E edge_nor %1.9E %1.9E\n"
,
iVertex, izTri[i], ita_par,
shared_v_n_verts[threadIdx.x].x, shared_v_n_verts[threadIdx.x].y,
shared_v_n_verts[threadIdx.x].z, opp_v.x, opp_v.y, opp_v.z,
gradvx.x, gradvx.y, gradvy.x, gradvy.y, gradvz.x, gradvz.y,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
edge_normal.x, edge_normal.y);
}
// Order of calculations may help things to go out/into scope at the right times so careful with that.
// we also want to get nu from somewhere. So precompute nu at the time we precompute ita_e = n Te / nu_e, ita_i = n Ti / nu_i.
if (ita_par > 0.0)
{
// For neutral fluid viscosity does not involve dimensional transfers.
f64_vec3 visc_contrib;
visc_contrib.x = over_m_n*(ita_par*gradvx.dot(edge_normal)); // if we are looking at higher vz looking out, go up.
visc_contrib.y = over_m_n*(ita_par*gradvy.dot(edge_normal));
visc_contrib.z = over_m_n*(ita_par*gradvz.dot(edge_normal));
// if (iVertex == VERTCHOSEN) {
// printf("visc_contrib %1.9E %1.9E %1.9E ita %1.10E \n",
// visc_contrib.x, visc_contrib.y, visc_contrib.z, ita_par);
// }
ownrates_visc += visc_contrib;
visc_htg += -THIRD*m_n*(htg_diff.dot(visc_contrib));
if (TESTNEUTVISC)
printf("htg_diff %1.9E %1.9E %1.9E visc_contrib %1.9E %1.9E %1.9E visc_htg %1.10E\n"
,
htg_diff.x, htg_diff.y, htg_diff.z, visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg
);
}
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
}; // p_Select
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
p_NT_addition_rate[iVertex].NnTn += visc_htg;
if (TESTNEUTVISC) {
printf("%d : cumulative d/dt NnTn %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NnTn);
};
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex %d NaN ownrates.x\n", iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex %d NAN VISC HTG\n", iVertex);
#endif
}
else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
// memcpy(&(ownrates), &(p_MAR_ion[iMinor]), sizeof(f64_vec3));
memset(&ownrates_visc, 0, sizeof(f64_vec3));
#ifdef COLLECT_VISC_HTG_IN_TRIANGLES
visc_htg = 0.0;
#else
f64 visc_htg0, visc_htg1, visc_htg2;
visc_htg0 = 0.0;
visc_htg1 = 0.0;
visc_htg2 = 0.0;
#endif
{
long izNeighMinor[6];
char szPBC[6];
if (TESTNEUTVISC2) printf("%d info.flag %d ita_ours %1.8E \n", iMinor, info.flag, shared_ita_par[threadIdx.x]);
// JUST TO GET IT TO RUN:
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS))
// && (info.pos.modulus() < 4.9) // if we have this then we have to have it in d/dbeta routine also.
&& (shared_ita_par[threadIdx.x] > 0.0)
&& (p_Select[iMinor] != 0)
) {
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (short i = 0; i < 6; i++)
{
if (p_Select[izNeighMinor[i]] != 0) {
bool bUsableSide = true;
{
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
f64 ita_par_opp = p_ita_neut_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_neut_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
};
}
}
// basically bUsableSide here just depends on whether min(ita, ita_opp) == 0.
bool bLongi = false;
#ifdef INS_INS_NONE
// Get rid of ins-ins triangle traffic:
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS)
bUsableSide = 0;
}
// if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
// bLongi = true;
// have to put it below
#else
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS)
bLongi = true;
}
#endif
f64_vec2 gradvx, gradvy, gradvz;
f64_vec2 edge_normal;
f64_vec3 htg_diff;
if (bUsableSide)
{
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
f64_vec3 prev_v, opp_v, next_v;
f64_vec2 prevpos, nextpos, opppos;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_v_n[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_v_n_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_v_n_minor[izNeighMinor[iprev]]), sizeof(f64_vec3));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
};
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_v_n[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_v_n_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_v_n_minor[izNeighMinor[inext]]), sizeof(f64_vec3));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
};
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
};
if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
bLongi = true;
#ifdef INS_INS_3POINT
if (TestDomainPos(prevpos) == false) {
gradvx = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n[threadIdx.x].z, next_v.z, opp_v.z
);
}
else {
if (TestDomainPos(nextpos) == false) {
gradvx = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n[threadIdx.x].x, opp_v.x
);
gradvy = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n[threadIdx.x].y, opp_v.y
);
gradvz = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n[threadIdx.x].z, opp_v.z
);
}
else {
gradvx = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n[threadIdx.x].z, next_v.z, opp_v.z
);
};
};
#else
if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
{
// One of the sides is dipped under the insulator -- set transverse deriv to 0.
// Bear in mind we are looking from a vertex into a tri, it can be ins tri.
gradvx = (opp_v.x - shared_v_n[threadIdx.x].x)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
gradvy = (opp_v.y - shared_v_n[threadIdx.x].y)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
gradvz = (opp_v.z - shared_v_n[threadIdx.x].z)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
}
else {
gradvx = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n[threadIdx.x].z, next_v.z, opp_v.z
);
}
#endif
#ifdef INS_INS_NONE
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
// just set it to 0.
bUsableSide = false;
gradvz.x = 0.0;
gradvz.y = 0.0;
gradvx.x = 0.0;
gradvx.y = 0.0;
gradvy.x = 0.0;
gradvy.y = 0.0;
};
};
#endif
htg_diff = shared_v_n[threadIdx.x] - opp_v;
if (TESTNEUTVISC2) {
printf("%d i %d prev_v %1.10E our_v %1.10E opp_v %1.10E next_v %1.10E\n",
iMinor, i, prev_v.y, shared_v_n[threadIdx.x].y, opp_v.y, next_v.y);
};
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
// if (iMinor == CHOSEN) printf("============================\nNeutral viscosity %d %d\n"
// "v.x %1.9E opp_v.x %1.9E prev_v.x %1.9E next_v.x %1.9E\n"
// "ourpos %1.9E %1.9E \n"
// "prevpos %1.9E %1.9E \n"
// "opppos %1.9E %1.9E \n"
// "nextpos %1.9E %1.9E \n"
// "gradvx %1.9E %1.9E gradvy %1.9E %1.9E edge_nor %1.9E %1.9E\n",
// iMinor, izNeighMinor[i],
// shared_v_n[threadIdx.x].x, opp_v.x, prev_v.x, next_v.x,
// info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
// gradvx.x, gradvx.y, gradvy.x, gradvy.y, edge_normal.x, edge_normal.y);
//
if (bLongi) {
// move any edge_normal endpoints that are below the insulator,
// until they are above the insulator.
edge_normal = ReconstructEdgeNormal(
prevpos, info.pos, nextpos, opppos
);
};
};
if (bUsableSide) {
f64_vec3 visc_contrib;
visc_contrib.x = over_m_n*ita_par*gradvx.dot(edge_normal);
visc_contrib.y = over_m_n*ita_par*gradvy.dot(edge_normal);
visc_contrib.z = over_m_n*ita_par*gradvz.dot(edge_normal);
// Set to 0 any that are pushing momentum uphill. For neutral this is unphysical.
// if (visc_contrib.x*htg_diff.x > 0.0) visc_contrib.x = 0.0;
// Can't do it because it'll ruin backward solve.
ownrates_visc += visc_contrib;
if (TESTNEUTVISC2) {
printf("%d i %d contrib.y %1.10E gradvy %1.10E %1.10E edge_nml %1.9E %1.9E ita %1.8E /m_n %1.8E cumu %1.9E\n",
iMinor, i, visc_contrib.y, gradvy.x, gradvy.y, edge_normal.x, edge_normal.y, ita_par, over_m_n, ownrates_visc.y);
};
if (i % 2 == 0) {
// vertex : heat collected by vertex
}
else {
#ifdef COLLECT_VISC_HTG_IN_TRIANGLES
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
#else
f64 heat_addn = -THIRD*m_ion*(htg_diff.dot(visc_contrib));
if (i == 1) {
visc_htg0 += 0.5*heat_addn;
visc_htg1 += 0.5*heat_addn;
}
else {
if (i == 3) {
visc_htg1 += 0.5*heat_addn;
visc_htg2 += 0.5*heat_addn;
}
else {
visc_htg0 += 0.5*heat_addn;
visc_htg2 += 0.5*heat_addn;
};
};
#endif
};
}; // bUsableSide
}; // p_Select
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_neut[iMinor]), &(ownrates), sizeof(f64_vec3));
#ifdef COLLECT_VISC_HTG_IN_TRIANGLES
p_NT_addition_tri[iMinor].NnTn += visc_htg;
#else
p_NT_addition_tri[iMinor * 3 + 0].NnTn += visc_htg0;
p_NT_addition_tri[iMinor * 3 + 1].NnTn += visc_htg1;
p_NT_addition_tri[iMinor * 3 + 2].NnTn += visc_htg2;
#endif
// We will have to round this up into the vertex heat afterwards.
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor %d NAN VISC HTG\n", iMinor);
#endif
// We do best by taking each boundary, considering how
// much heat to add for each one.
}
else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
}
__global__ void kernelCreate_neutral_viscous_contrib_to_MAR_and_NT_SYMM(
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_v_n_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_neut_minor, //
f64 * __restrict__ p_nu_neut_minor, //
f64_vec3 * __restrict__ p_MAR_neut,
NTrates * __restrict__ p_NT_addition_rate,
NTrates * __restrict__ p_NT_addition_tri)
{
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// There is room for some more double in shared per thread.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 ownrates_visc;
f64 visc_htg;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_ita_par[threadIdx.x] = p_ita_neut_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_neut_minor[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = p_ita_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
f64_vec2 cc0, cc1;
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
// JUST TO GET IT TO RUN: LIMIT OURSELVES TO RADIUS 4.5 :
if ((info.flag == DOMAIN_VERTEX) && (info.pos.modulus() < 4.5)
&& (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
short i = 0;
f64_vec3 opp_v;// , prev_v, next_v; // never used
f64_vec2 opppos, prevpos, nextpos;
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E izTri[i] %d \n", opp_v.x, izTri[i]);
}
else {
opp_v = p_v_n_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E v_n_minor izTri[i] %d \n", opp_v.x, izTri[i]);
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
CalculateCircumcenter(&cc0, info.pos, opppos, prevpos);
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
} else {
f64 ita_theirs = p_ita_neut_minor[izTri[i]];
f64 nu_theirs = p_nu_neut_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
} else {
ita_par = ita_theirs;
nu = nu_theirs;
};
// I understand why we are still doing minimum ita at the wall but we would ideally like to stop.
};
// Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
// f64_vec2 gradvx, gradvy, gradvz;
f64_vec3 htg_diff;
// f64_vec2 edge_normal;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
CalculateCircumcenter(&cc1, opppos, info.pos, nextpos);
if (ita_par > 0.0) // note it was the minimum taken.
{
// ideally we might want to leave position out of the loop so that we can avoid reloading it.
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E izTri[i] %d \n", opp_v.x, izTri[i]);
} else {
opp_v = p_v_n_minor[izTri[i]];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E v_n_minor izTri[i] %d \n", opp_v.x, izTri[i]);
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
RotateAnticlockwise(opp_v);
}
// BEWARE OF WHEN EDGE ISN'T CLOCKWISE ORDERED -- WE TAKE NEGATIVE AREA as well?
f64_vec3 deriv = (opp_v - shared_v_n_verts[threadIdx.x]) / (opppos - info.pos).modulus();
f64_vec3 visc_contrib = over_m_n*ita_par*deriv*(cc1 - cc0).modulus();
// there is an unnecessary sqrt but remember, sqrt is still cheaper than a divide.
htg_diff.x = shared_v_n_verts[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n_verts[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n_verts[threadIdx.x].z - opp_v.z;
if (TESTNEUTVISC)
printf("============================\nNeutral viscosity %d tri %d ita_par %1.10E\n"
"v %1.9E %1.9E %1.9E opp_v %1.9E %1.9E %1.9E\n"
"ourpos %1.8E %1.8E prevpos %1.8E %1.8E opppos %1.8E %1.8E nextpos %1.8E %1.8E \n"
,
iVertex, izTri[i], ita_par,
shared_v_n_verts[threadIdx.x].x, shared_v_n_verts[threadIdx.x].y,
shared_v_n_verts[threadIdx.x].z, opp_v.x, opp_v.y, opp_v.z,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y);
// For neutral fluid viscosity does not involve dimensional transfers.
//f64_vec3 visc_contrib;
//visc_contrib.x = over_m_n*(ita_par*gradvx.dot(edge_normal)); // if we are looking at higher vz looking out, go up.
//visc_contrib.y = over_m_n*(ita_par*gradvy.dot(edge_normal));
//visc_contrib.z = over_m_n*(ita_par*gradvz.dot(edge_normal));
ownrates_visc += visc_contrib;
visc_htg += -THIRD*m_n*(htg_diff.dot(visc_contrib));
if (TESTNEUTVISC)
printf("htg_diff %1.9E %1.9E %1.9E visc_contrib %1.9E %1.9E %1.9E visc_htg %1.10E\n"
,
htg_diff.x, htg_diff.y, htg_diff.z, visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg
);
}
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
cc0 = cc1;
prevpos = opppos;
opppos = nextpos;
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
p_NT_addition_rate[iVertex].NnTn += visc_htg;
if (TESTNEUTVISC) {
printf("%d : cumulative d/dt NnTn %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NnTn);
};
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex %d NaN ownrates.x\n", iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex %d NAN VISC HTG\n", iVertex);
#endif
} else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
// memcpy(&(ownrates), &(p_MAR_ion[iMinor]), sizeof(f64_vec3));
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
f64_vec3 opp_v;
f64_vec2 prevpos, nextpos, opppos;
{
long izNeighMinor[6];
char szPBC[6];
if (TESTNEUTVISC2) printf("%d info.flag %d ita_ours %1.8E \n", iMinor, info.flag, shared_ita_par[threadIdx.x]);
// JUST TO GET IT TO RUN:
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) &&
(info.pos.modulus() < 4.9) && (shared_ita_par[threadIdx.x] > 0.0)){
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
short i = 0;
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
};
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
CalculateCircumcenter(&cc0, info.pos, opppos, prevpos);
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
iprev = i - 1; if (iprev < 0) iprev = 5;
bool bUsableSide = true;
{
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
f64 ita_par_opp = p_ita_neut_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_neut_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
};
}
};
// basically bUsableSide here just depends on whether min(ita, ita_opp) == 0.
f64_vec3 htg_diff;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
CalculateCircumcenter(&cc1, info.pos, nextpos, opppos);
if (bUsableSide) {
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
}
else {
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
RotateAnticlockwise(opp_v);
}
htg_diff.x = shared_v_n[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n[threadIdx.x].z - opp_v.z;
f64_vec3 deriv = (opp_v - shared_v_n[threadIdx.x]) / (opppos - info.pos).modulus();
f64_vec3 visc_contrib = over_m_n*ita_par*(cc1 - cc0).modulus()*deriv;
// Set to 0 any that are pushing momentum uphill. For neutral this is unphysical.
// if (visc_contrib.x*htg_diff.x > 0.0) visc_contrib.x = 0.0;
// Can't do it because it'll ruin backward solve.
ownrates_visc += visc_contrib;
if (0)//iMinor == CHOSEN)
printf("============================\nNeutral viscosity %d %d \n"
"v.z %1.9E opp_v.z %1.9E ita_par %1.9E edgelen %1.9E dist_out %1.9E \n"
"ourpos %1.9E %1.9E prevpos %1.9E %1.9E opppos %1.9E %1.9E nextpos %1.9E %1.9E \n"
"deriv.z %1.9E visc_contrib.z %1.9E ownrates.z %1.9E cc0 %1.8E %1.8E cc1 %1.8E %1.8E\n",
iMinor, izNeighMinor[i],
shared_v_n[threadIdx.x].z, opp_v.z, ita_par,
(cc1 - cc0).modulus(), (opppos - info.pos).modulus(),
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
deriv.z, visc_contrib.z, ownrates_visc.z, cc0.x,cc0.y, cc1.x, cc1.y
);
if (i % 2 == 0) {
// vertex : heat collected by vertex
} else {
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
};
}; // bUsableSide
cc0 = cc1;
prevpos = opppos;
opppos = nextpos;
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_neut[iMinor]), &(ownrates), sizeof(f64_vec3));
p_NT_addition_tri[iMinor].NnTn += visc_htg;
// We will have to round this up into the vertex heat afterwards.
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor %d NAN VISC HTG\n", iMinor);
#endif
// We do best by taking each boundary, considering how
// much heat to add for each one.
}
else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
}
/*
__global__ void kernelNeutral_pressure_and_momflux(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighTriMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
T3 * __restrict__ p_T_minor,
f64_vec3 * __restrict__ p_v_n_minor,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just to handle insulator
f64_vec2 * __restrict__ p_v_overall_minor,
f64_vec3 * __restrict__ p_MAR_neut
)
{
this routine is missing the changes to handle insulator tris, and shuold not have a setting vr=0
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor];
__shared__ f64_vec2 shared_v_overall[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_Tn[threadsPerTileMinor]; // 3+2+2+1=8 per thread
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_v_overall_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Tn_verts[threadsPerTileMajor]; // 1/2( 13+3+2+2+1 = 21) = 10.5 => total 18.5 per minor thread.
// shame we couldn't get down to 16 per minor thread, and if we could then that might be better even if we load on-the-fly something.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_v_overall[threadIdx.x] = p_v_overall_minor[iMinor];
shared_Tn[threadIdx.x] = p_T_minor[iMinor].Tn; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
// Advection should be an outer cycle at 1e-10 s.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if (info.flag == DOMAIN_VERTEX) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_v_overall_verts[threadIdx.x] = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_Tn_verts[threadIdx.x] = p_T_minor[iVertex + BEGINNING_OF_CENTRAL].Tn;
}
else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
memset(&(shared_v_overall_verts[threadIdx.x]), 0, sizeof(f64_vec2));
shared_Tn_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
f64_vec3 our_v, opp_v, prev_v, next_v;
f64 oppT, prevT, nextT, ourT;
f64_vec2 our_v_overall, prev_v_overall, next_v_overall, opp_v_overall;
f64_vec2 opppos, prevpos, nextpos;
f64 AreaMinor;
if (threadIdx.x < threadsPerTileMajor) {
AreaMinor = 0.0;
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
our_v = shared_v_n_verts[threadIdx.x];
our_v_overall = shared_v_overall_verts[threadIdx.x];
ourT = shared_Tn_verts[threadIdx.x];
if (info.flag == DOMAIN_VERTEX) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevT = shared_Tn[izTri[iprev] - StartMinor];
prev_v = shared_v_n[izTri[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
T3 prev_T = p_T_minor[izTri[iprev]];
prevT = prev_T.Tn;
prev_v = p_v_n_minor[izTri[iprev]];
prev_v_overall = p_v_overall_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v = Clockwise_rotate3(prev_v);
prev_v_overall = Clockwise_d*prev_v_overall;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v = Anticlock_rotate3(prev_v);
prev_v_overall = Anticlockwise_d*prev_v_overall;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppT = shared_Tn[izTri[i] - StartMinor];
opp_v = shared_v_n[izTri[i] - StartMinor];
opp_v_overall = shared_v_overall[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
} else {
T3 opp_T = p_T_minor[izTri[i]];
oppT = opp_T.Tn;
opp_v = p_v_n_minor[izTri[i]];
opp_v_overall = p_v_overall_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v = Clockwise_rotate3(opp_v);
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v = Anticlock_rotate3(opp_v);
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
// Think carefully: DOMAIN vertex cases for n,T ...
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
short iend = tri_len;
f64_vec2 projendpt0;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2;
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
} else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
}
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextT = shared_Tn[izTri[inext] - StartMinor];
next_v = shared_v_n[izTri[inext] - StartMinor];
next_v_overall = shared_v_overall[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
T3 next_T = p_T_minor[izTri[inext]];
nextT = next_T.Tn;
next_v = p_v_n_minor[izTri[inext]];
next_v_overall = p_v_overall_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v = Clockwise_rotate3(next_v);
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v = Anticlock_rotate3(next_v);
next_v_overall = Anticlockwise_d*next_v_overall;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 n1;
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
f64 T0, T1;
T0 = THIRD*(prevT + ourT + oppT);
T1 = THIRD*(nextT + ourT + oppT);
f64_vec3 v0 = THIRD*(our_v + prev_v + opp_v);
f64_vec3 v1 = THIRD*(our_v + opp_v + next_v);
f64 relvnormal = 0.5*((v0 + v1).xypart()
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
// CHANGES 20th August 2019
// OLD, unstable:
// MAR_neut -= 0.5*relvnormal* (n0 *(v0-our_v) + n1 * (v1 - our_v));
if (relvnormal < 0.0)
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v - our_v);
// Note: minus a minus so correct sign
// And we did what? We took n at centre of a triangle WITHIN this major cell
// But did not take upwind n ---- is that consistent for all advection?
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
// ______________________________________________________
//// whether the v that is leaving is greater than our v ..
//// Formula:
//// dv/dt = (d(Nv)/dt - dN/dt v) / N
//// We include the divide by N when we enter the accel routine.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prevT = oppT;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
oppT = nextT;
opp_v_overall = next_v_overall;
}; // next i
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &(MAR_neut), sizeof(f64_vec3));
} else {
// NOT domain vertex: Do nothing
};
}; // was it domain vertex or Az-only
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
our_v = shared_v_n[threadIdx.x];
ourT = shared_Tn[threadIdx.x];
our_v_overall = shared_v_overall[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighTriMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Do nothing? Who cares what it is.
} else {
AreaMinor = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_v_n[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevT = shared_Tn[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_v_n_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prev_v_overall = shared_v_overall_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevT = shared_Tn_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_v_n_minor[izNeighMinor[iprev]]), sizeof(f64_vec3));
prev_v_overall = p_v_overall_minor[izNeighMinor[iprev]];
prevT = p_T_minor[izNeighMinor[iprev]].Tn;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v = Clockwise_rotate3(prev_v);
prev_v_overall = Clockwise_d*prev_v_overall;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v = Anticlock_rotate3(prev_v);
prev_v_overall = Anticlockwise_d*prev_v_overall;
};
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
opp_v_overall = shared_v_overall[izNeighMinor[i] - StartMinor];
oppT = shared_Tn[izNeighMinor[i] - StartMinor];
} else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opp_v_overall = shared_v_overall_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppT = shared_Tn_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
opp_v_overall = p_v_overall_minor[izNeighMinor[i]];
T3 opp_T = p_T_minor[izNeighMinor[i]];
oppT = opp_T.Tn;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v = Clockwise_rotate3(opp_v);
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v = Anticlock_rotate3(opp_v);
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
} else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
} else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_v_n[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
next_v_overall = shared_v_overall[izNeighMinor[inext] - StartMinor];
nextT = shared_Tn[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_v_n_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
next_v_overall = shared_v_overall_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextT = shared_Tn_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_v_n_minor[izNeighMinor[inext]]), sizeof(f64_vec3));
next_v_overall = p_v_overall_minor[izNeighMinor[inext]];
nextT = p_T_minor[izNeighMinor[inext]].Tn;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v = Clockwise_rotate3(next_v);
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v = Anticlock_rotate3(next_v);
next_v_overall = Anticlockwise_d*next_v_overall;
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
n0 = n_array[i];
n1 = n_array[inext]; // 0,1 are either side of corner 0. What is seq of MinorNeigh ?
// Assume neighs 0,1 are relevant to border with tri 0 minor.
f64_vec3 v0 = THIRD*(our_v + prev_v + opp_v);
f64_vec3 v1 = THIRD*(our_v + next_v + opp_v);
//if (((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
{ // Decided not to add test
f64 relvnormal = 0.5*((v0 + v1).xypart()
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
// CHANGES 20th August 2019:
// OLD, unstable:
// MAR_neut -= 0.5*relvnormal* (n0 *(v0-our_v) + n1 * (v1 - our_v));
if (relvnormal < 0.0)
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v - our_v);
f64 T0 = THIRD*(ourT + prevT + oppT);
f64 T1 = THIRD*(ourT + nextT + oppT);
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
// do nothing
}
else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
// Or allowed a below-ins value to affect something anyway.
// Just for sanity for now, let's just set our own n,T for the edge:
n0 = p_n_minor[iMinor].n_n;
n1 = p_n_minor[iMinor].n_n;
T0 = ourT;
T1 = ourT;
}
}
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
}
endpt0 = endpt1;
prevT = oppT;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
oppT = nextT;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
};
if (info.flag == CROSSING_INS) {
// In this case set v_r = 0 and set a_TP_r = 0 and dv/dt _r = 0 in general
//f64_vec2 rhat = info.pos / info.pos.modulus();
MAR_neut -= Make3(
(MAR_neut.dotxy(info.pos) /
(info.pos.x*info.pos.x + info.pos.y*info.pos.y))*info.pos, 0.0);
no
// Hmm
};
memcpy(&(p_MAR_neut[iMinor]), &(MAR_neut), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
} // non-domain tri
}; // was it FRILL
}*/
__global__ void kernelNeutral_pressure(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighTriMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
T3 * __restrict__ p_T_minor,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just to handle insulator
bool * __restrict__ bz_pressureflag,
f64_vec3 * __restrict__ p_MAR_neut
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_Tn[threadsPerTileMinor]; // 3+2+2+1=8 per thread
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Tn_verts[threadsPerTileMajor]; // 1/2( 13+3+2+2+1 = 21) = 10.5 => total 18.5 per minor thread.
// shame we couldn't get down to 16 per minor thread, and if we could then that might be better even if we load on-the-fly something.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
shared_Tn[threadIdx.x] = p_T_minor[iMinor].Tn; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
// Advection should be an outer cycle at 1e-10 s.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Tn_verts[threadIdx.x] = p_T_minor[iVertex + BEGINNING_OF_CENTRAL].Tn;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
};
};
__syncthreads();
f64 oppT, prevT, nextT, ourT;
f64_vec2 opppos, prevpos, nextpos;
f64 AreaMinor;
if (threadIdx.x < threadsPerTileMajor) {
AreaMinor = 0.0;
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
ourT = shared_Tn_verts[threadIdx.x];
bool bPressure = bz_pressureflag[iVertex];
if (bPressure) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevT = shared_Tn[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
T3 prev_T = p_T_minor[izTri[iprev]];
prevT = prev_T.Tn;
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppT = shared_Tn[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
T3 opp_T = p_T_minor[izTri[i]];
oppT = opp_T.Tn;
opppos = p_info_minor[izTri[i]].pos;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
short iend = tri_len;
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextT = shared_Tn[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
T3 next_T = p_T_minor[izTri[inext]];
nextT = next_T.Tn;
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 n1;
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
f64 T0, T1;
T0 = THIRD*(prevT + ourT + oppT);
T1 = THIRD*(nextT + ourT + oppT);
// And we did what? We took n at centre of a triangle WITHIN this major cell
// But did not take upwind n ---- is that consistent for all advection?
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
// AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
if (TESTVNXVERT) {
printf("iVertex %d %d iTri %d : contrib.x %1.8E n01 %1.8E %1.8E T01 %1.8E %1.8E oppT %1.8E cumu %1.8E\n",
iVertex, i, izTri[i], 0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.x,
n0, n1, T0, T1, oppT, MAR_neut.x);
}
if (TESTVNYVERT) {
printf("iVertex %d %d iTri %d : contrib.y %1.8E n01 %1.8E %1.8E T01 %1.8E %1.8E oppT %1.8E cumu %1.8E\n",
iVertex, i, izTri[i], 0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.y,
n0, n1, T0, T1, oppT, MAR_neut.y);
}
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prevT = oppT;
opppos = nextpos;
oppT = nextT;
}; // next i
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &(MAR_neut), sizeof(f64_vec3));
}
else {
// NOT domain vertex: Do nothing
};
}; // was it domain vertex or Az-only
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
ourT = shared_Tn[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighTriMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Do nothing? Who cares what it is.
} else {
AreaMinor = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevT = shared_Tn[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevT = shared_Tn_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
prevT = p_T_minor[izNeighMinor[iprev]].Tn;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
};
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
oppT = shared_Tn[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppT = shared_Tn_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
T3 opp_T = p_T_minor[izNeighMinor[i]];
oppT = opp_T.Tn;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
} else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
} else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
nextT = shared_Tn[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextT = shared_Tn_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
nextT = p_T_minor[izNeighMinor[inext]].Tn;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
if (TESTVNY2) printf("i %d prevpos %1.9E %1.9E opppos %1.9E %1.9E nextpos %1.9E %1.9E\n",
i, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
n0 = n_array[i];
n1 = n_array[inext]; // 0,1 are either side of corner 0. What is seq of MinorNeigh ?
// Assume neighs 0,1 are relevant to border with tri 0 minor.
//if (((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
{ // Decided not to add test
f64 T0 = THIRD*(ourT + prevT + oppT);
f64 T1 = THIRD*(ourT + nextT + oppT);
// CROSSING_INS pressure isn't working .. somehow we are not getting edge_normal.y to sum to zero
// when we have n and T the same all the way round.
// However there's not much sense in having it anyway since nT can't possibly be different from top to bottom.
// We could inherit the x-direction pressure if we wanted though...
// Ideally it would be good to ask why this is happening.
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
if (TESTVNY2) printf("CROSSING INS! THERMAL PRESSURE %d MAR_neut %1.10E contrib %1.10E n01 %1.8E %1.8E\n"
"T %1.10E %1.10E edge_normal %1.9E %1.9E \n",
iMinor, MAR_neut.y, 0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.y,
n0, n1, T0, T1, edge_normal.x, edge_normal.y);
} else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
// Or allowed a below-ins value to affect something anyway.
// Just for sanity for now, let's just set our own n,T for the edge:
n0 = p_n_minor[iMinor].n_n;
n1 = p_n_minor[iMinor].n_n;
T0 = ourT;
T1 = ourT;
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
}
else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// set nT on the edge: try just the average of the two nT, weighted by distance to own centre.
// Recall periodic when we look at distance to own centre.
f64 nT_edge = 0.5*(p_n_minor[iMinor].n_n*ourT + p_n_minor[izNeighMinor[i]].n_n*oppT);
MAR_neut -= Make3(nT_edge*over_m_n*edge_normal, 0.0);
if ((TESTVNY2)) printf("crossing-crossing: contrib %1.8E nT_edge %1.8E edge_normal %1.8E ourT %1.8E oppT %1.8E endpt0 %1.8E %1.8E edpt1 %1.8E %1.8E\n", -nT_edge*over_m_n*edge_normal.y,
nT_edge, edge_normal.y, ourT, oppT,endpt0.x, endpt0.y, endpt1.x, endpt1.y);
}
else {
// looking out the bottom of the insulator triangle at a within-insulator vertex or triangle.
// so we want to project the point up to the insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
// endpt0 is THIRD * (prevpos + info.pos + opppos)
// move towards the position that is 2 previous --- ie the vertex above.
// (Don't forget PBC.)
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
if (TESTVNY2) printf("prevprevpos %1.9E %1.9E \n", prevprevpos.x, prevprevpos.y);
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
}
else {
if (TESTVNY2) printf("%%%%%%%%%%%%%%%%%%%%%%%%%% \nprevflag %d iprev %d izNeighMinor[iprev] %d\n%%%%%%%%%%%%%%%%%%%%%%%% /n",
prevflag, iprev, izNeighMinor[iprev]);
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
// Don't forget PBC
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
if (TESTVNY2) printf("nextnextpos %1.9E %1.9E \n", nextnextpos.x, nextnextpos.y);
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
}
else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
}
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
f64 nT_edge = p_n_minor[iMinor].n_n*ourT;
MAR_neut -= Make3(nT_edge*over_m_n*edge_normal, 0.0);
if ((TESTVNY2)) printf("Looking into ins: contrib %1.8E nT_edge %1.8E edge_normal %1.8E endpot01 %1.8E %1.8E , %1.8E %1.8E\n", -nT_edge*over_m_n*edge_normal.y,
nT_edge, edge_normal.y, endpt0.x, endpt0.y, endpt1.x, endpt1.y);
// will be a 0 contribution if endpt1 = endpt0, that's ok.
}; // CROSSING_INS neigh or not
}; // domain triangle neigh opposite or not
} else {
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
if (TESTVNY2) printf("domain! PRESSURE %d : %d %d MAR_neut %1.10E contrib %1.10E n01 %1.8E %1.8E\n"
"T %1.10E %1.10E edge_normal %1.9E %1.9E \n",
iMinor, i, izNeighMinor[i], MAR_neut.y, 0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.y,
n0, n1, T0, T1, edge_normal.x, edge_normal.y);
};
if ((TESTVNY2))
printf("iMinor %d : flag %d : %d %d [flag %d] n01 %1.9E %1.9E T01 %1.9E %1.9E oppT %1.9E contrib %1.10E MAR %1.9E\n",
iMinor, info.flag, i, izNeighMinor[i],
p_info_minor[izNeighMinor[i]].flag,
n0, n1, T0, T1, oppT, -0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.y,
MAR_neut.y);
}
endpt0 = endpt1;
prevT = oppT;
prevpos = opppos;
oppT = nextT;
opppos = nextpos;
iprev = i;
};
memcpy(&(p_MAR_neut[iMinor]), &(MAR_neut), sizeof(f64_vec3));
} else {
// Not domain, not crossing_ins, not a frill
// ==========================================
// Crossing cath goes here: no pressure.
// AreaMinor unknown ???
} // non-domain tri
}; // was it FRILL
}
__global__ void kernelNeutral_momflux(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighTriMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
f64_vec3 * __restrict__ p_v_n_minor,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just to handle insulator
f64_vec2 * __restrict__ p_v_overall_minor,
f64_vec3 * __restrict__ p_MAR_neut,
NTrates * __restrict__ NT_addition_tri
)
{
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor];
__shared__ f64_vec2 shared_v_overall[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_v_overall_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
// shame we couldn't get down to 16 per minor thread, and if we could then that might be better even if we load on-the-fly something.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_v_overall[threadIdx.x] = p_v_overall_minor[iMinor];
// Advection should be an outer cycle at 1e-10 s.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_v_overall_verts[threadIdx.x] = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
memset(&(shared_v_overall_verts[threadIdx.x]), 0, sizeof(f64_vec2));
};
};
__syncthreads();
f64_vec3 our_v, opp_v, prev_v, next_v;
f64_vec2 our_v_overall, prev_v_overall, next_v_overall, opp_v_overall;
f64_vec2 opppos, prevpos, nextpos;
f64 AreaMinor;
if (threadIdx.x < threadsPerTileMajor) {
AreaMinor = 0.0;
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
our_v = shared_v_n_verts[threadIdx.x];
our_v_overall = shared_v_overall_verts[threadIdx.x];
if (info.flag == DOMAIN_VERTEX) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_v_n[izTri[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_v_n_minor[izTri[iprev]];
prev_v_overall = p_v_overall_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v = Clockwise_rotate3(prev_v);
prev_v_overall = Clockwise_d*prev_v_overall;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v = Anticlock_rotate3(prev_v);
prev_v_overall = Anticlockwise_d*prev_v_overall;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
opp_v_overall = shared_v_overall[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_v_n_minor[izTri[i]];
opp_v_overall = p_v_overall_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v = Clockwise_rotate3(opp_v);
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v = Anticlock_rotate3(opp_v);
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
// Think carefully: DOMAIN vertex cases for n,T ...
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
short iend = tri_len;
// DOMAIN_VERTEX only here!
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_v_n[izTri[inext] - StartMinor];
next_v_overall = shared_v_overall[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_v_n_minor[izTri[inext]];
next_v_overall = p_v_overall_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v = Clockwise_rotate3(next_v);
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v = Anticlock_rotate3(next_v);
next_v_overall = Anticlockwise_d*next_v_overall;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 n1;
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
f64_vec3 v0 = THIRD*(our_v + prev_v + opp_v);
f64_vec3 v1 = THIRD*(our_v + opp_v + next_v);
f64 relvnormal = 0.5*((v0 + v1).xypart()
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
// CHANGES 20th August 2019
// OLD, unstable:
// MAR_neut -= 0.5*relvnormal* (n0 *(v0-our_v) + n1 * (v1 - our_v));
int neighflag = p_info_minor[izTri[i]].flag;
if (neighflag == DOMAIN_TRIANGLE) {
if (relvnormal > 0.0) {
// losing stuff
MAR_neut -= 0.5*relvnormal*(n0 + n1)*our_v;
}
else {
MAR_neut -= 0.5*relvnormal*(n0 + n1)*opp_v;
// Why it's minus?
// relvnormal was less than zero but we gain a positive amt of opp_v.
};
};
if (TESTVNY3) {
printf("%d | %d %d | MAR_neuty %1.9E contrib %1.9E %1.9E n0+n1 %1.9E v0.y %1.9E \n"
"our_v_overall %1.9E next_v_overall %1.9E prev_v_overall %1.9E \n"
"our_v %1.9E opp_v %1.9E next_v %1.9E prev_v %1.9E\n",
iVertex, i, izTri[i], MAR_neut.y,
-0.5*relvnormal* (n0 + n1) *(our_v.y),
-0.5*relvnormal* (n0 + n1) *(opp_v.y),
n0+n1, v0.y,
our_v_overall.y, next_v_overall.y, prev_v_overall.y,
our_v.y, opp_v.y, next_v.y, prev_v.y);
}
if (TESTVNXVERT) {
printf("%d | %d %d | MAR_neutx %1.9E contrib %1.9E %1.9E n0+n1 %1.9E v0.y %1.9E \n"
"our_v_overall %1.9E next_v_overall %1.9E prev_v_overall %1.9E \n"
"our_v %1.9E opp_v %1.9E next_v %1.9E prev_v %1.9E\n",
iVertex, i, izTri[i], MAR_neut.x,
-0.5*relvnormal* (n0 + n1) *(our_v.x),
-0.5*relvnormal* (n0 + n1) *(opp_v.x),
n0 + n1, v0.x,
our_v_overall.x, next_v_overall.x, prev_v_overall.x,
our_v.x, opp_v.x, next_v.x, prev_v.x);
}
// ______________________________________________________
//// whether the v that is leaving is greater than our v ..
//// Formula:
//// dv/dt = (d(Nv)/dt - dN/dt v) / N
//// We include the divide by N when we enter the accel routine.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
}; // next i
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &(MAR_neut), sizeof(f64_vec3));
}
else {
// NOT domain vertex: Do nothing
};
}; // was it domain vertex or Az-only
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
our_v = shared_v_n[threadIdx.x];
our_v_overall = shared_v_overall[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighTriMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Do nothing? Who cares what it is.
}
else {
AreaMinor = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_v_n[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_v_n_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prev_v_overall = shared_v_overall_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_v_n_minor[izNeighMinor[iprev]]), sizeof(f64_vec3));
prev_v_overall = p_v_overall_minor[izNeighMinor[iprev]];
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v = Clockwise_rotate3(prev_v);
prev_v_overall = Clockwise_d*prev_v_overall;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v = Anticlock_rotate3(prev_v);
prev_v_overall = Anticlockwise_d*prev_v_overall;
};
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
opp_v_overall = shared_v_overall[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opp_v_overall = shared_v_overall_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
opp_v_overall = p_v_overall_minor[izNeighMinor[i]];
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v = Clockwise_rotate3(opp_v);
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v = Anticlock_rotate3(opp_v);
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_v_n[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
next_v_overall = shared_v_overall[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_v_n_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
next_v_overall = shared_v_overall_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_v_n_minor[izNeighMinor[inext]]), sizeof(f64_vec3));
next_v_overall = p_v_overall_minor[izNeighMinor[inext]];
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v = Clockwise_rotate3(next_v);
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v = Anticlock_rotate3(next_v);
next_v_overall = Anticlockwise_d*next_v_overall;
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
n0 = n_array[i];
n1 = n_array[inext]; // 0,1 are either side of corner 0. What is seq of MinorNeigh ?
// Assume neighs 0,1 are relevant to border with tri 0 minor.
f64_vec3 v0 = THIRD*(our_v + prev_v + opp_v);
f64_vec3 v1 = THIRD*(our_v + next_v + opp_v);
f64 relvnormal = 0.5*((v0 + v1).xypart()
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
if (izNeighMinor[i] < BEGINNING_OF_CENTRAL) {
// Note that average instead of upwind, is of course unstable.
if (relvnormal > 0.0) {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(our_v);
}
else {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v);
};
}
} else {
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 v_overall0, v_overall1;
v_overall0 = THIRD * (our_v_overall + prev_v_overall + opp_v_overall);
v_overall1 = THIRD * (our_v_overall + next_v_overall + opp_v_overall);
// Note that this follows from the arithmetic definition of the thing.
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
// endpt1 is defined in this way, so its motion must be defined accordingly.
// The v_overall of the below-insulator point is actually 0.
f64 r3 = nextpos.modulus();
v_overall1 = ((DEVICE_RADIUS_INSULATOR_OUTER - r3) / (r1 - r3))*v_overall0;
// but has no radial component:
v_overall1 -= (v_overall1.dot(endpt1)) / (endpt1.dot(endpt1))*endpt1;
}
else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
f64 r3 = prevpos.modulus();
v_overall0 = ((DEVICE_RADIUS_INSULATOR_OUTER - r3) / (r2 - r3))*v_overall1;
// but has no radial component:
v_overall0 -= (v_overall0.dot(endpt0)) / (endpt0.dot(endpt0))*endpt1;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// have not yet handled how to do momflux between two CROSSING_INS tris.
// the above vxy1 etc will be invalid because of taking data from insulator points.
// Does that mean we will get weird effects? Probably. Have to think here then.
// Reset relvnormal:
if (prev_v.z == 0.0) v0 = 0.5*(our_v + opp_v);
if (next_v.z == 0.0) v1 = 0.5*(our_v + opp_v);
if (n0 == 0.0) // generated from shardmodel from inside the insulator, then it should come out 0.
n0 = 0.5*(p_n_minor[iMinor].n_n + p_n_minor[izNeighMinor[i]].n_n);
if (n1 == 0.0)
n1 = 0.5*(p_n_minor[iMinor].n_n + p_n_minor[izNeighMinor[i]].n_n);
relvnormal = 0.5*((v0 + v1).xypart()
- v_overall0 - v_overall1).dot(edge_normal);
if (relvnormal > 0.0) {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(our_v);
} else {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v);
};
} else {
// Looking down into insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
}
else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
}
else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
};
// will be a 0 contribution if endpt1 = endpt0, that's ok.
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// should be facing towards (0,0).
// Insulator arc isn't moving, no v_overall.
relvnormal = our_v.dotxy(edge_normal);
if (relvnormal > 0.0) {
f64 n_edge = p_n_minor[iMinor].n_n;
// Only the vr component is reversed!!!
// f64 vr = -our_v.vxy.dot(edge_normal) / edge_normal.modulus();
// rhat = -edge_normal/edge_normal.modulus();
// v-= vr rhat
f64_vec2 vr_rhat = edge_normal*((our_v.dotxy(edge_normal)) /
(edge_normal.dot(edge_normal)));
// positive amt * negative r vector = negative amt * positive r vector.
f64 vr_squared = our_v.dotxy(edge_normal)*our_v.dotxy(edge_normal) /
edge_normal.dot(edge_normal);
MAR_neut -= 2.0*relvnormal*n_edge*Make3(vr_rhat, 0.0);
// Now add heat:
// change in 0.5 Nvv = 0.5v d/dt(Nv) = vr*vr*n_edge*relvnormal since v dot vr rhat = vr^2
// change in 1.5 NT should cancel this.
NT_addition_tri[iMinor].NnTn += 0.6666666666667*m_n*vr_squared*n_edge*relvnormal;
};
// If we are pulling away from the ins, do nothing!
};
};
} else {
// Typical edge.
if (relvnormal > 0.0) {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(our_v);
} else {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v);
};
if (((TESTVNY2)))
printf("advectiveGPU %d i %d MAR_neut.y %1.12E contrib >0 %1.12E <0 %1.12E relvnormal %1.12E\n"
"n0 %1.12E n1 %1.12E v01.y %1.12E %1.12E vxyours.y %1.12E opp %1.12E\n"
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
CHOSEN, i,
MAR_neut.y,
-0.5*relvnormal* (n0 + n1) *(our_v.y), -0.5*relvnormal* (n0 + n1) *opp_v.y,
relvnormal,
n0, n1, v0.y, v1.y, our_v.y, opp_v.y);
};
// Notice that we also conserved momentum while we were doing ionization changes, or that was the intention.
iprev = i;
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
};
memcpy(&(p_MAR_neut[iMinor]), &(MAR_neut), sizeof(f64_vec3));
} else {
// Not domain, not crossing_ins, not a frill
// ==========================================
} // non-domain tri
}; // was it FRILL
}
__global__ void kernelAntiAdvect(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighTriMinor,
char * __restrict__ p_szPBCtriminor,
AAdot * __restrict__ p_AAdot,
f64_vec2 * __restrict__ p_v_overall_minor,
AAdot * __restrict__ p_AAdot_dest
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ AAdot shared_AAdot[threadsPerTileMinor]; // 3+2+2+1=8 per thread
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ AAdot shared_AAdot_verts[threadsPerTileMajor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
structural info = p_info_minor[iMinor];
shared_pos[threadIdx.x] = info.pos;
shared_AAdot[threadIdx.x] = p_AAdot[iMinor];
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_AAdot_verts[threadIdx.x] = p_AAdot[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
AAdot oppA, prevA, nextA, ourA;
f64_vec2 opppos, prevpos, nextpos, Integ_grad_Az, Integ_grad_Azdot;
f64 AreaMinor;
if (threadIdx.x < threadsPerTileMajor)
{
if (info.flag == DOMAIN_VERTEX) {// otherwise no move
AreaMinor = 0.0;
Integ_grad_Az.x = 0.0;
Integ_grad_Az.y = 0.0;
Integ_grad_Azdot.x = 0.0;
Integ_grad_Azdot.y = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
ourA = shared_AAdot_verts[threadIdx.x];
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevA = shared_AAdot[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevA = p_AAdot[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppA = shared_AAdot[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppA = p_AAdot[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
// Think carefully: DOMAIN vertex cases for n,T ...
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
short iend = tri_len;
// we said DOMAIN_VERTEX
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextA = shared_AAdot[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextA = p_AAdot[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// And we did what? We took n at centre of a triangle WITHIN this major cell
// But did not take upwind n ---- is that consistent for all advection?
f64 Az_edge = SIXTH * (2.0*ourA.Az + 2.0*oppA.Az + prevA.Az + nextA.Az);
Integ_grad_Az += Az_edge*edge_normal;
f64 Azdot_edge = SIXTH * (2.0*ourA.Azdot + 2.0*oppA.Azdot + prevA.Azdot + nextA.Azdot);
Integ_grad_Azdot += Azdot_edge*edge_normal;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
prevA = oppA;
opppos = nextpos;
oppA = nextA;
}; // next i
f64_vec2 Grad_Az = Integ_grad_Az / AreaMinor;
f64_vec2 Grad_Azdot = Integ_grad_Azdot / AreaMinor;
AAdot AAdot_dest;
f64_vec2 v_overall = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
AAdot_dest.Az = ourA.Az + h_use*Grad_Az.dot(v_overall);
AAdot_dest.Azdot = ourA.Azdot + h_use*Grad_Azdot.dot(v_overall);
// Why was this minus?
p_AAdot_dest[iVertex + BEGINNING_OF_CENTRAL] = AAdot_dest;
} else {
p_AAdot_dest[iVertex + BEGINNING_OF_CENTRAL] = shared_AAdot_verts[threadIdx.x];
};
};
// now the minor with n_ion part:
info = p_info_minor[iMinor];
ourA = shared_AAdot[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighTriMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
AreaMinor = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
Integ_grad_Az.x = 0.0;
Integ_grad_Az.y = 0.0;
Integ_grad_Azdot.x = 0.0;
Integ_grad_Azdot.y = 0.0;
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevA = shared_AAdot[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevA = shared_AAdot_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
prevA = p_AAdot[izNeighMinor[iprev]];
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
};
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
oppA = shared_AAdot[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppA = shared_AAdot_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
oppA = p_AAdot[izNeighMinor[i]];
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
nextA = shared_AAdot[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextA = shared_AAdot_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
nextA = p_AAdot[izNeighMinor[inext]];
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
f64 Az_edge = SIXTH * (2.0*ourA.Az + 2.0*oppA.Az + prevA.Az + nextA.Az);
Integ_grad_Az += Az_edge*edge_normal;
f64 Azdot_edge = SIXTH * (2.0*ourA.Azdot + 2.0*oppA.Azdot + prevA.Azdot + nextA.Azdot);
Integ_grad_Azdot += Azdot_edge*edge_normal;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevA = oppA;
prevpos = opppos;
oppA = nextA;
opppos = nextpos;
};
f64_vec2 Grad_Az = Integ_grad_Az / AreaMinor;
f64_vec2 Grad_Azdot = Integ_grad_Azdot / AreaMinor;
AAdot AAdot_dest;
f64_vec2 v_overall = p_v_overall_minor[iMinor];
AAdot_dest.Az = ourA.Az + h_use*Grad_Az.dot(v_overall);
AAdot_dest.Azdot = ourA.Azdot + h_use*Grad_Azdot.dot(v_overall);
// Why was this minus?
p_AAdot_dest[iMinor] = AAdot_dest;
} else {
p_AAdot_dest[iMinor] = shared_AAdot[threadIdx.x]; // no move
};
}
__global__ void kernelGet_AreaMinorFluid(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
bool * __restrict__ bz_pressureflag,
f64 * __restrict__ p_AreaMinor
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
};
__syncthreads();
f64_vec2 opppos, prevpos, nextpos;
if (threadIdx.x < threadsPerTileMajor) {
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
{
// Let's say we do for every vertex.
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
} else {
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
// Think carefully: DOMAIN vertex cases for n,T ...
f64_vec2 endpt1, endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 store_first_point = endpt0;
short iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) iend = tri_len - 2;
// Bear in mind for OUTERMOST, the triangles go clockwise not anticlockwise.
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
}
if (info.flag == OUTERMOST)
{
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc.
radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
endpt1 = store_first_point;
nextpos = p_info_minor[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor;
};
};// if (threadIdx.x < threadsPerTileMajor)
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
p_AreaMinor[iMinor] = 1.0e-12;
}
else {
f64 AreaMinor = 0.0;
short iprev, inext, i;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
iprev = 5;
i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
// if (iMinor == CHOSEN) printf("%d : endpt %1.8E %1.8E | %1.8E %1.8E ;\n ",
// iMinor, endpt0.x, endpt0.y, endpt1.x, endpt1.y);
// To get integral grad we add the averages along the edges times edge_normals
// if (iMinor == CHOSEN) printf("%d : %d opppos %1.8E %1.8E \n",
// iMinor, izNeighMinor[i], opppos.x, opppos.y);
// To get integral grad we add the averages along the edges times edge_normals
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
} else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
// Or allowed a below-ins value to affect something anyway.
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// endpt0 = THIRD * (prevpos + info.pos + opppos);
// endpt1 = THIRD * (nextpos + info.pos + opppos);
// edge_normal.x = endpt1.y - endpt0.y;
// edge_normal.y = endpt0.x - endpt1.x;
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
} else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
};
} else {
// looking out the bottom of the insulator triangle at a within-insulator vertex or triangle.
// so we want to project the point up to the insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
// endpt0 is THIRD * (prevpos + info.pos + opppos)
// move towards the position that is 2 previous --- ie the vertex above.
// (Don't forget PBC.)
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
} else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
// Don't forget PBC
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
}
else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
}
// will be a 0 contribution if endpt1 = endpt0, that's ok.
};
}; // domain triangle opposite or not
} else {
};
// if (iMinor == CHOSEN) printf(" endpt %1.8E %1.8E | %1.8E %1.8E ; \n",
// endpt0.x, endpt0.y, endpt1.x, endpt1.y);
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*(endpt1.y - endpt0.y);
// See a way that FP accuracy was eroded: we take a difference of two close things already to get edge_normal.
// can that be cleverly avoided? For all calcs?
endpt0 = endpt1;
iprev = i;
prevpos = opppos;
opppos = nextpos;
};
// No setting a_r = 0
p_AreaMinor[iMinor] = AreaMinor;
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
iprev = 5; i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
};
p_AreaMinor[iMinor] = AreaMinor;
} // non-domain tri
}; // was it FRILL
}
__global__ void kernelAccumulateSummandsVisc(
f64_vec2 * __restrict__ p_eps_xy, //
f64 * __restrict__ p_eps_iz,
f64 * __restrict__ p_eps_ez,
f64_vec2 * __restrict__ p_d_epsxy_by_d_beta, // f64_vec2
f64 * __restrict__ p_d_eps_iz_by_d_beta,
f64 * __restrict__ p_d_eps_ez_by_d_beta,
// outputs:
f64 * __restrict__ p_sum_eps_deps_, // 8 values for this block
f64 * __restrict__ p_sum_product_matrix_
)
{
__shared__ f64 sumdata_eps_deps[threadsPerTileMinor / 4][REGRESSORS];
__shared__ f64 sum_product[threadsPerTileMinor / 4][REGRESSORS][REGRESSORS];
// Call with threadsPerTileMinor/4
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
long const iMinor = threadIdx.x + blockIdx.x * threadsPerTileMinor;
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
f64_vec2 depsbydbeta2[REGRESSORS];
f64 depsbydbeta[REGRESSORS], depsbydbeta_e[REGRESSORS];
f64_vec2 eps_xy;
f64 eps_iz, eps_ez;
int i, j;
memset(&(sumdata_eps_deps[threadIdx.x]), 0, sizeof(f64)*REGRESSORS);
memset(&(sum_product[threadIdx.x]), 0, sizeof(f64)*REGRESSORS*REGRESSORS);
eps_xy = p_eps_xy[iMinor];
eps_iz = p_eps_iz[iMinor];
eps_ez = p_eps_ez[iMinor];
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
depsbydbeta2[i] = p_d_epsxy_by_d_beta[iMinor + i*NMINOR];
depsbydbeta[i] = p_d_eps_iz_by_d_beta[iMinor + i*NMINOR];
depsbydbeta_e[i] = p_d_eps_ez_by_d_beta[iMinor + i*NMINOR];
};
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] = depsbydbeta2[i].dot(eps_xy)
+ depsbydbeta[i]*eps_iz + depsbydbeta_e[i]*eps_ez;
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] = depsbydbeta2[i].dot(depsbydbeta2[j])
+ depsbydbeta[i] * depsbydbeta[j] + depsbydbeta_e[i] * depsbydbeta_e[j];
};
eps_xy = p_eps_xy[iMinor + threadsPerTileMinor / 4];
eps_iz = p_eps_iz[iMinor + threadsPerTileMinor / 4];
eps_ez = p_eps_ez[iMinor + threadsPerTileMinor / 4];
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
depsbydbeta2[i] = p_d_epsxy_by_d_beta[iMinor + threadsPerTileMinor / 4 + i*NMINOR];
depsbydbeta[i] = p_d_eps_iz_by_d_beta[iMinor + threadsPerTileMinor / 4 + i*NMINOR];
depsbydbeta_e[i] = p_d_eps_ez_by_d_beta[iMinor + threadsPerTileMinor / 4 + i*NMINOR];
};
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += depsbydbeta2[i].dot(eps_xy)
+ depsbydbeta[i] * eps_iz + depsbydbeta_e[i] * eps_ez;
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += depsbydbeta2[i].dot(depsbydbeta2[j])
+ depsbydbeta[i] * depsbydbeta[j] + depsbydbeta_e[i] * depsbydbeta_e[j];
};
eps_xy = p_eps_xy[iMinor + threadsPerTileMinor / 2];
eps_iz = p_eps_iz[iMinor + threadsPerTileMinor / 2];
eps_ez = p_eps_ez[iMinor + threadsPerTileMinor / 2];
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
depsbydbeta2[i] = p_d_epsxy_by_d_beta[iMinor + threadsPerTileMinor / 2 + i*NMINOR];
depsbydbeta[i] = p_d_eps_iz_by_d_beta[iMinor + threadsPerTileMinor / 2 + i*NMINOR];
depsbydbeta_e[i] = p_d_eps_ez_by_d_beta[iMinor + threadsPerTileMinor / 2 + i*NMINOR];
};
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += depsbydbeta2[i].dot(eps_xy)
+ depsbydbeta[i] * eps_iz + depsbydbeta_e[i] * eps_ez;
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += depsbydbeta2[i].dot(depsbydbeta2[j])
+ depsbydbeta[i] * depsbydbeta[j] + depsbydbeta_e[i] * depsbydbeta_e[j];
};
eps_xy = p_eps_xy[iMinor + 3 * threadsPerTileMinor / 4];
eps_iz = p_eps_iz[iMinor + 3 * threadsPerTileMinor / 4];
eps_ez = p_eps_ez[iMinor + 3 * threadsPerTileMinor / 4];
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
depsbydbeta2[i] = p_d_epsxy_by_d_beta[iMinor + 3 * threadsPerTileMinor / 4 + i*NMINOR];
depsbydbeta[i] = p_d_eps_iz_by_d_beta[iMinor + 3 * threadsPerTileMinor / 4 + i*NMINOR];
depsbydbeta_e[i] = p_d_eps_ez_by_d_beta[iMinor + 3 * threadsPerTileMinor / 4 + i*NMINOR];
};
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += depsbydbeta2[i].dot(eps_xy)
+ depsbydbeta[i] * eps_iz + depsbydbeta_e[i] * eps_ez;
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += depsbydbeta2[i].dot(depsbydbeta2[j])
+ depsbydbeta[i] * depsbydbeta[j] + depsbydbeta_e[i] * depsbydbeta_e[j];
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += sumdata_eps_deps[threadIdx.x + k][i];
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += sum_product[threadIdx.x + k][i][j];
};
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += sumdata_eps_deps[threadIdx.x + s - 1][i];
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += sum_product[threadIdx.x + s - 1][i][j];
};
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
memcpy(&(p_sum_eps_deps_[blockIdx.x*REGRESSORS]), &(sumdata_eps_deps[0][0]), sizeof(f64)*REGRESSORS);
memcpy(&(p_sum_product_matrix_[blockIdx.x*REGRESSORS*REGRESSORS]), &(sum_product[0][0][0]), sizeof(f64)*REGRESSORS*REGRESSORS);
};
}
__global__ void SplitVector4(
f64_vec2 * __restrict__ p_xy,
f64 * __restrict__ p_z1,
f64 * __restrict__ p_z2,
v4 * __restrict__ p_v4,
int * __restrict__ p_Select
)
{
long const iMinor = threadIdx.x + blockIdx.x * threadsPerTileMinor;
v4 temp;
if (p_Select[iMinor] == 0) {
memset(&temp, 0, sizeof(v4));
} else {
temp = p_v4[iMinor];
}
p_xy[iMinor] = temp.vxy;
p_z1[iMinor] = temp.viz;
p_z2[iMinor] = temp.vez;
}
__global__ void kernelAccumulateSummandsProduct(
f64_vec2 * __restrict__ p_eps_xy, //
f64 * __restrict__ p_eps_iz,
f64 * __restrict__ p_eps_ez,
f64_vec2 * __restrict__ p_d_epsxy_by_d_beta, // f64_vec2
f64 * __restrict__ p_d_eps_iz_by_d_beta,
f64 * __restrict__ p_d_eps_ez_by_d_beta,
// outputs:
f64 * __restrict__ p_sum_eps_deps_
)
{
__shared__ f64 sumdata_eps_deps[threadsPerTileMinor];
long const iMinor = threadIdx.x + blockIdx.x * threadsPerTileMinor;
f64_vec2 depsbydbeta2;
f64 depsbydbeta, depsbydbeta_e;
f64_vec2 eps_xy;
f64 eps_iz, eps_ez;
int i, j;
sumdata_eps_deps[threadIdx.x] = 0.0;
//memset(&(sumdata_eps_deps[threadIdx.x]), 0, sizeof(f64)*REGRESSORS);
eps_xy = p_eps_xy[iMinor];
eps_iz = p_eps_iz[iMinor];
eps_ez = p_eps_ez[iMinor];
depsbydbeta2 = p_d_epsxy_by_d_beta[iMinor];
depsbydbeta = p_d_eps_iz_by_d_beta[iMinor];
depsbydbeta_e = p_d_eps_ez_by_d_beta[iMinor];
sumdata_eps_deps[threadIdx.x] = depsbydbeta2.dot(eps_xy)
+ depsbydbeta * eps_iz + depsbydbeta_e * eps_ez;
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sumdata_eps_deps[threadIdx.x] += sumdata_eps_deps[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sumdata_eps_deps[threadIdx.x] += sumdata_eps_deps[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sum_eps_deps_[blockIdx.x] = sumdata_eps_deps[0];
};
}
|
80675b2dbe76a47efc52e9d0de129e29e75e0f82.cu
|
#include "kernel.h"
#include "helpers.cu"
#include "vector_tensor.cu"
#include "cuda_struct.h"
#include "constant.h"
#include "FFxtubes.h"
#define BWDSIDET
#define LONGITUDINAL
#define SQRTNT 1
// TO DO:
// Line 1420:
// Yes, very much a waste. The edge positions should be calculated from the vertex positions, we can
// load flags to determine if it is an insulator-crossing triangle and that is the proper way to handle that.
#define FOUR_PI 12.5663706143592
#define TEST (0) //iVertex == VERTCHOSEN)
#define TEST_ELEC_VISC_TRI (0) //iMinor == CHOSEN)
#define TESTNEUTVISC2 (0) // iMinor == CHOSEN)
#define TESTPRESSUREY (0) //iVertex == VERTCHOSEN)
#define TEST_T (0)
#define TEST3 (0)
#define TEST1 (0)
#define TESTTRI (0) // thermal pressure output & infer minor density & momflux_minor
#define TESTADVECT (0)
#define TESTADVECTZ (0)//iVertex == VERTCHOSEN)
#define TESTADVECTNEUT (0) //iVertex == VERTCHOSEN)
#define TESTIONVERTVISC (0)//(iVertex == VERTCHOSEN)
#define TESTNEUTVISC (0) // iVertex == VERTCHOSEN)
#define TESTVISC (0) //iMinor == CHOSEN)
#define TESTIONVISC (0)
#define TESTHEAT (0)
#define TESTHEATFULL (0)
#define TESTHEAT1 (0)
#define TESTTRI2 (0)
#define TESTTRI3 (0)
#define TESTHEAT2 (0)
#define TESTIONISE (0)
#define TESTOHMS (0) //iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define TEST_IONIZE (0) //iVertex == VERTCHOSEN)
#define TESTACCEL (0) //iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define TESTACCEL2 (0) //iMinor - BEGINNING_OF_CENTRAL == VERTCHOSEN)
#define TESTACCEL_X (0) // PopOhms output
#define TESTLAP (0)
#define TESTLAP2 (0) //(iMinor == CHOSEN1) || (iMinor == CHOSEN2))
#define TESTVEZ (0)//iMinor == CHOSEN)
#define TEST_VS_MATRIX (0) //iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define TEST_VS_MATRIX2 (0) // iVertex == VERTCHOSEN
#define TESTVNX (0)
#define TESTVNY (0) //iMinor == CHOSEN)//PopOhms
#define TESTVNY2 (0) // iMinor == CHOSEN) //neutral momflux
#define TESTVNY3 (0)// || (iVertex == VERTCHOSEN2))
#define TESTVNZ (0)//iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define TEST_ADV_HEAT_FLAG 0
#define TEST_ADV_MASS_FLAG 0
#define TESTVNXVERT (0)
#define TESTVNYVERT (0)
#define TEST_ACCEL_Y (0) // iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL)
#define VISCMAG 1
#define MIDPT_A
#define TEST_ACCEL_EZ (0)//iMinor == CHOSEN)
#define TEST_EPSILON_Y (0)
#define TEST_EPSILON_X (0)
#define TEST_EPSILON_Y_IMINOR (0)//iMinor == lChosen)
#define TEST_EPSILON_X_MINOR (0) // iMinor == CHOSEN)
#define ARTIFICIAL_RELATIVE_THRESH 1.0e10 // if we let it be more strict than heat thresh then it drives a difference generating heat!
#define ARTIFICIAL_RELATIVE_THRESH_HEAT 1.0e10 // typical initial density is 1e8 vs 1e18
#define LOW_THRESH_FOR_VISC_CALCS 1.0e10 // density. This should not be too much greater than the density where we do not soak away velocity and heat. At the moment it's 100 times.
#define MINIMUM_NU_EI_DENSITY 1.0e12
// Try excluding only if both sides are at this density -- it just doesn't matter.
// Just exclude to/from anything this sparse. It doesn't matter.
// Heat is the one that can be a problem as soaking away heat means that we never can grow our ionization level - we're basically stuck at 0
// unless a giant wave of ions sweeps in. We start at low ionization.. 1e8/1e18 = 1e-10.
// Change log. 090419: Change upwind density routine to just use n from the lowest cell that is upwind for at least 1 side.
// 230419: Change nu_n used in kappa_neut to be a lc of collision frequencies.
// 250419: Change to use min(ita_ours, ita_theirs). Maybe need to do same for kappa_par.
// Change to apportion visc heat from tri per N.
//const int Chosens[7] = { 25454, 86529, 25453, 86381, 25455, 86530, 25750 };
__device__ f64 ArtificialUpliftFactor(f64 n_i, f64 n_n)
{
// Used in ionization uplift and in heat inter-species transfer.
// At n_i = 1e9, nn 1e14 we want nn equiv 1e20 so 1e6 uplift
// We do not care much about small amt of neutrals as much as small amt of ions.
if (n_i + n_n > 1.0e15) return 1.0;
f64 t = (n_n*1.0e15 + n_i*(n_i + n_n)) / ((n_i + n_n)*(n_i + n_n));
return min(t*t,1.0e6);
// Having to boost up when < 1e15 because our dodgy point has > 1e14.
}
__device__ f64 ArtificialUpliftFactor_MT(f64 n_i, f64 n_n)
{
if (n_i > 1.0e13) return 1.0;
// Used in crushing v to be hydrodynamic and in viscous ita.
f64 additional_nn = min(exp(-n_i*n_i/0.5e24)*(1.0e30 / (n_i)), 1.0e20); // high effective density to produce hydrodynamics
// n <= 1e10 : additional_nn ~= 1e20
// n == 1e11 : additional_nn ~= 1e19
// n == 1e12 : additional_nn ~= 1e17
// n == 1e13 : additional_nn ~= 1e-70
return 1.0 + additional_nn /n_n;
}
__device__ __forceinline__ void CalculateCircumcenter(f64_vec2 * p_cc, f64_vec2 poscorner0, f64_vec2 poscorner1, f64_vec2 poscorner2)
{
f64_vec2 Bb = poscorner1 - poscorner0;
f64_vec2 C = poscorner2 - poscorner0;
f64 D = 2.0*(Bb.x*C.y - Bb.y*C.x);
f64 modB = Bb.x*Bb.x + Bb.y*Bb.y;
f64 modC = C.x*C.x + C.y*C.y;
p_cc->x = (C.y*modB - Bb.y*modC) / D + poscorner0.x;
p_cc->y = (Bb.x*modC - C.x*modB) / D + poscorner0.y;
// formula agrees with wikipedia so why does it give a stupid result.
}
__device__ __forceinline__ bool TestDomainPos(f64_vec2 pos)
{
return (
(pos.x*pos.x + pos.y*pos.y > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
&&
(pos.x*pos.x + (pos.y - CATHODE_ROD_R_POSITION)*(pos.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
);
}
__device__ f64 GetRecombinationRate_given_v(f64 const Te, int i_v)
{
f64 const TeeV = Te / kB;
f64 const Tesq = TeeV*TeeV;
f64 const Te3 = Tesq*TeeV;
f64 rate, rate1, rate2;
if (Te > 4.75e-11) return 0.0;
if (Te < 1.875e-12) { // Magic numbers!!
rate = (recomb_coeffs[i_v][0][4] + recomb_coeffs[i_v][0][3] * TeeV
+ recomb_coeffs[i_v][0][2] * Tesq + recomb_coeffs[i_v][0][1] * Te3);
if (i_v < 7) {
rate /= (1.0 - recomb_coeffs[i_v][0][0] * TeeV);
} else {
rate += exp(-TeeV*0.5)*recomb_coeffs[i_v][0][0];
};
} else {
if (Te < 2.25e-12) {
rate1 = (recomb_coeffs[i_v][0][4] + recomb_coeffs[i_v][0][3] * TeeV
+ recomb_coeffs[i_v][0][2] * Tesq + recomb_coeffs[i_v][0][1] * Te3);
rate2 = (recomb_coeffs[i_v][1][4] + recomb_coeffs[i_v][1][3] * TeeV
+ recomb_coeffs[i_v][1][2] * Tesq + recomb_coeffs[i_v][1][1] * Te3);
if (i_v < 7) {
rate1 /= (1.0 - recomb_coeffs[i_v][0][0] * TeeV);
rate2 /= (1.0 - recomb_coeffs[i_v][1][0] * TeeV);
}
else {
rate1 += exp(-TeeV*0.5)*recomb_coeffs[i_v][0][0];
rate2 += exp(-TeeV*0.5)*recomb_coeffs[i_v][1][0];
};
f64 ppn_high = (Te - 1.875e-12) / (2.25e-12 - 1.875e-12);
f64 ppn_low = (2.25e-12 - Te) / (2.25e-12-1.875e-12);
rate = rate1*ppn_low + rate2*ppn_high;
} else {
if (Te < 1.05e-11) {
rate = (recomb_coeffs[i_v][1][4] + recomb_coeffs[i_v][1][3] * TeeV
+ recomb_coeffs[i_v][1][2] * Tesq + recomb_coeffs[i_v][1][1] * Te3);
if (i_v < 7) {
rate /= (1.0 - recomb_coeffs[i_v][1][0] * TeeV);
}
else {
rate += exp(-TeeV*0.5)*recomb_coeffs[i_v][1][0];
};
} else {
if (Te < 1.0875e-11) {
rate1 = (recomb_coeffs[i_v][1][4] + recomb_coeffs[i_v][1][3] * TeeV
+ recomb_coeffs[i_v][1][2] * Tesq + recomb_coeffs[i_v][1][1] * Te3);
rate2 = (recomb_coeffs[i_v][2][4] + recomb_coeffs[i_v][2][3] * TeeV
+ recomb_coeffs[i_v][2][2] * Tesq + recomb_coeffs[i_v][2][1] * Te3);
if (i_v < 7) {
rate1 /= (1.0 - recomb_coeffs[i_v][1][0] * TeeV);
rate2 /= (1.0 - recomb_coeffs[i_v][2][0] * TeeV);
} else {
rate1 += exp(-TeeV*0.5)*recomb_coeffs[i_v][1][0];
rate2 += exp(-TeeV*0.5)*recomb_coeffs[i_v][2][0];
};
f64 ppn_high = (Te - 1.05e-11) / (1.0875e-11 - 1.05e-11);
f64 ppn_low = (1.0875e-11 - Te) / (1.0875e-11 - 1.05e-11);
rate = rate1*ppn_low + rate2*ppn_high;
} else {
rate = (recomb_coeffs[i_v][2][4] + recomb_coeffs[i_v][2][3] * TeeV
+ recomb_coeffs[i_v][2][2] * Tesq + recomb_coeffs[i_v][2][1] * Te3);
if (i_v < 7) {
rate /= (1.0 - recomb_coeffs[i_v][2][0] * TeeV);
} else {
rate += exp(-TeeV*0.5)*recomb_coeffs[i_v][2][0];
};
};
};
};
};
return rate;
}
__device__ f64 GetIonizationRate_given_v(f64 const Te, int i_v)
{
f64 TeeV = Te / kB;
if (Te > ionize_temps[i_v][9]) {
TeeV = ionize_temps[i_v][9] / kB;
}
f64 Tesq = TeeV*TeeV;
f64 Te3 = Tesq*TeeV;
f64 Te4 = Tesq*Tesq;
f64 rate, rate1, rate2;
bool b_exp[5];
memset(b_exp, 0, sizeof(bool) * 5);
if (i_v < 18) {
b_exp[0] = true; b_exp[1] = true; b_exp[2] = true;
};
if (i_v == 18) {
b_exp[0] = true; b_exp[1] = true;
}
//printf("i_v %d b_exp %d %d %d %d %d \n", i_v, (b_exp[0]) ? 1 : 0, (b_exp[1]) ? 1 : 0, (b_exp[2]) ? 1 : 0, (b_exp[3]) ? 1 : 0, (b_exp[4]) ? 1 : 0);
if (Te < ionize_temps[i_v][0]) {
if (i_v < 19) {
rate = 0.0;
} // let's say 18 is where we treat as over critical velocity.
else {
TeeV = ionize_temps[i_v][0] / kB; // return low end value
rate = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
};
} else {
if (Te < ionize_temps[i_v][1]) {
rate = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
if (b_exp[0]) rate = exp(rate);
}
else {
if (Te < ionize_temps[i_v][2]) {
rate1 = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
rate2 = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
if (b_exp[0]) rate1 = exp(rate1);
if (b_exp[1]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][1]) / (ionize_temps[i_v][2] - ionize_temps[i_v][1]);
f64 ppn_low = (ionize_temps[i_v][2] - Te) / (ionize_temps[i_v][2] - ionize_temps[i_v][1]);
rate = rate1*ppn_low + rate2*ppn_high;
}
else {
if (Te < ionize_temps[i_v][3])
{
rate = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
if (b_exp[1]) rate = exp(rate);
} else {
if (Te < ionize_temps[i_v][4]) {
rate1 = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
rate2 = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
if (b_exp[1]) rate1 = exp(rate1);
if (b_exp[2]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][3]) / (ionize_temps[i_v][4] - ionize_temps[i_v][3]);
f64 ppn_low = (ionize_temps[i_v][4] - Te) / (ionize_temps[i_v][4] - ionize_temps[i_v][3]);
rate = rate1*ppn_low + rate2*ppn_high;
}
else {
if (Te < ionize_temps[i_v][5]) {
rate = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
if (b_exp[2]) rate = exp(rate);
}
else {
if (Te < ionize_temps[i_v][6]) {
rate1 = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
rate2 = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[2]) rate1 = exp(rate1);
if (b_exp[3]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][5]) / (ionize_temps[i_v][6] - ionize_temps[i_v][5]);
f64 ppn_low = (ionize_temps[i_v][6] - Te) / (ionize_temps[i_v][6] - ionize_temps[i_v][5]);
rate = rate1*ppn_low + rate2*ppn_high;
}
else {
if (Te < ionize_temps[i_v][7]) {
rate = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[3]) rate = exp(rate); // it is always false anyway
}
else {
if (Te < ionize_temps[i_v][8]) {
rate1 = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
rate2 = (ionize_coeffs[i_v][4][4] + ionize_coeffs[i_v][4][3] * TeeV
+ ionize_coeffs[i_v][4][2] * Tesq + ionize_coeffs[i_v][4][1] * Te3
+ ionize_coeffs[i_v][4][0] * Te4);
if (b_exp[3]) rate1 = exp(rate1); // it is always false anyway
if (b_exp[4]) rate2 = exp(rate2); // it is always false anyway
f64 ppn_high = (Te - ionize_temps[i_v][7]) / (ionize_temps[i_v][8] - ionize_temps[i_v][7]);
f64 ppn_low = (ionize_temps[i_v][8] - Te) / (ionize_temps[i_v][8] - ionize_temps[i_v][7]);
rate = rate1*ppn_low + rate2*ppn_high;
}
else {
rate = (ionize_coeffs[i_v][4][4] + ionize_coeffs[i_v][4][3] * TeeV
+ ionize_coeffs[i_v][4][2] * Tesq + ionize_coeffs[i_v][4][1] * Te3
+ ionize_coeffs[i_v][4][0] * Te4);
if (b_exp[4]) rate = exp(rate); // it is always false anyway
};
}
};
};
};
};
};
};
};
return rate;
}
__device__ f64 GetIonizationRate_given_v_Debug(f64 const Te, int i_v)
{
f64 TeeV = Te / kB;
if (Te > ionize_temps[i_v][9]) {
TeeV = ionize_temps[i_v][9] / kB;
}
f64 Tesq = TeeV*TeeV;
f64 Te3 = Tesq*TeeV;
f64 Te4 = Tesq*Tesq;
f64 rate, rate1, rate2;
bool b_exp[5];
memset(b_exp, 0, sizeof(bool) * 5);
if (i_v < 18) {
b_exp[0] = true; b_exp[1] = true; b_exp[2] = true;
};
if (i_v == 18) {
b_exp[0] = true; b_exp[1] = true;
}
printf("i_v %d b_exp %d %d %d %d %d \n", i_v, (b_exp[0]) ? 1 : 0, (b_exp[1]) ? 1 : 0, (b_exp[2]) ? 1 : 0, (b_exp[3]) ? 1 : 0, (b_exp[4]) ? 1 : 0);
if (Te < ionize_temps[i_v][0]) {
if (i_v < 19) {
rate = 0.0;
printf("Te %1.12E was below %1.12E and we returned 0 ionization rate.\n", Te, ionize_temps[i_v][0]);
} // let's say 18 is where we treat as over critical velocity.
else {
TeeV = ionize_temps[i_v][0] / kB; // return low end value
rate = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
printf("used low end rate: %1.9E \n", rate);
};
} else {
if (Te < ionize_temps[i_v][1]) {
rate = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
if (b_exp[0]) rate = exp(rate);
printf("i_v %d Te %1.8E b_exp[0] %d 0-1 rate %1.8E coeffs[0] \n", i_v, Te,
(b_exp[0] ? 1:0), rate, ionize_coeffs[i_v][0][0]);
} else {
if (Te < ionize_temps[i_v][2]) {
rate1 = (ionize_coeffs[i_v][0][4] + ionize_coeffs[i_v][0][3] * TeeV
+ ionize_coeffs[i_v][0][2] * Tesq + ionize_coeffs[i_v][0][1] * Te3
+ ionize_coeffs[i_v][0][0] * Te4);
rate2 = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
if (b_exp[0]) rate1 = exp(rate1);
if (b_exp[1]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][1]) / (ionize_temps[i_v][2] - ionize_temps[i_v][1]);
f64 ppn_low = (ionize_temps[i_v][2] - Te) / (ionize_temps[i_v][2] - ionize_temps[i_v][1]);
rate = rate1*ppn_low + rate2*ppn_high;
printf("i_v %d Te %1.8E b_exp[0] %d b_exp[1] %d 1-2 rate %1.8E coeffs[1][0] \n", i_v, Te,
(b_exp[0] ? 1 : 0), (b_exp[1] ? 1 : 0), rate, ionize_coeffs[i_v][1][0]);
} else {
if (Te < ionize_temps[i_v][3])
{
rate = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
if (b_exp[1]) rate = exp(rate);
printf("i_v %d Te %1.8E b_exp[0] %d b_exp[1] %d Temps2-3 = 1 rate %1.8E coeffs[1][0] \n", i_v, Te,
(b_exp[0] ? 1 : 0), (b_exp[1] ? 1 : 0), rate, ionize_coeffs[i_v][1][0]);
} else {
if (Te < ionize_temps[i_v][4]) {
rate1 = (ionize_coeffs[i_v][1][4] + ionize_coeffs[i_v][1][3] * TeeV
+ ionize_coeffs[i_v][1][2] * Tesq + ionize_coeffs[i_v][1][1] * Te3
+ ionize_coeffs[i_v][1][0] * Te4);
rate2 = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
if (b_exp[1]) rate1 = exp(rate1);
if (b_exp[2]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][3]) / (ionize_temps[i_v][4] - ionize_temps[i_v][3]);
f64 ppn_low = (ionize_temps[i_v][4] - Te) / (ionize_temps[i_v][4] - ionize_temps[i_v][3]);
rate = rate1*ppn_low + rate2*ppn_high;
printf("i_v %d Te %1.8E b_exp[1] %d b_exp[2] %d 3-4 -> 1-2 rate %1.8E coeffs[2][0] \n", i_v, Te,
(b_exp[1] ? 1 : 0), (b_exp[2] ? 1 : 0), rate, ionize_coeffs[i_v][2][0]);
} else {
if (Te < ionize_temps[i_v][5]) {
rate = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
if (b_exp[2]) rate = exp(rate);
printf("i_v %d Te %1.8E b_exp[2] %d b_exp[3] %d 4-5 rate %1.8E coeffs[3][0] \n", i_v, Te,
(b_exp[2] ? 1 : 0), (b_exp[3] ? 1 : 0), rate, ionize_coeffs[i_v][3][0]);
} else {
if (Te < ionize_temps[i_v][6]) {
rate1 = (ionize_coeffs[i_v][2][4] + ionize_coeffs[i_v][2][3] * TeeV
+ ionize_coeffs[i_v][2][2] * Tesq + ionize_coeffs[i_v][2][1] * Te3
+ ionize_coeffs[i_v][2][0] * Te4);
rate2 = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[2]) rate1 = exp(rate1);
if (b_exp[3]) rate2 = exp(rate2);
f64 ppn_high = (Te - ionize_temps[i_v][5]) / (ionize_temps[i_v][6] - ionize_temps[i_v][5]);
f64 ppn_low = (ionize_temps[i_v][6] - Te) / (ionize_temps[i_v][6] - ionize_temps[i_v][5]);
rate = rate1*ppn_low + rate2*ppn_high;
printf("i_v %d Te %1.8E b_exp[2] %d b_exp[3] %d 5-6-> 2-3 rate %1.8E coeffs[3][0] \n", i_v, Te,
(b_exp[2] ? 1 : 0), (b_exp[3] ? 1 : 0), rate, ionize_coeffs[i_v][3][0]);
} else {
if (Te < ionize_temps[i_v][7]) {
rate = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[3]) rate = exp(rate); // it is always false anyway
printf("i_v %d Te %1.8E b_exp[3] %d b_exp[3] %d 6-7 rate %1.8E coeffs[3][0] \n", i_v, Te,
(b_exp[3] ? 1 : 0), (b_exp[3] ? 1 : 0), rate, ionize_coeffs[i_v][3][0]);
} else {
if (Te < ionize_temps[i_v][8]) {
rate1 = (ionize_coeffs[i_v][3][4] + ionize_coeffs[i_v][3][3] * TeeV
+ ionize_coeffs[i_v][3][2] * Tesq + ionize_coeffs[i_v][3][1] * Te3
+ ionize_coeffs[i_v][3][0] * Te4);
if (b_exp[3]) rate1 = exp(rate1); // it is always false anyway
rate2 = (ionize_coeffs[i_v][4][4] + ionize_coeffs[i_v][4][3] * TeeV
+ ionize_coeffs[i_v][4][2] * Tesq + ionize_coeffs[i_v][4][1] * Te3
+ ionize_coeffs[i_v][4][0] * Te4);
if (b_exp[4]) rate2 = exp(rate2); // it is always false anyway
f64 ppn_high = (Te - ionize_temps[i_v][7]) / (ionize_temps[i_v][8] - ionize_temps[i_v][7]);
f64 ppn_low = (ionize_temps[i_v][8] - Te) / (ionize_temps[i_v][8] - ionize_temps[i_v][7]);
rate = rate1*ppn_low + rate2*ppn_high;
printf("i_v %d Te %1.8E b_exp[3] %d b_exp[4] %d 7-8 rate %1.8E coeffs[4][0] \n", i_v, Te,
(b_exp[3] ? 1 : 0), (b_exp[4] ? 1 : 0), rate, ionize_coeffs[i_v][4][0]);
} else {
rate = (ionize_coeffs[i_v][4][4] + ionize_coeffs[i_v][4][3] * TeeV
+ ionize_coeffs[i_v][4][2] * Tesq + ionize_coeffs[i_v][4][1] * Te3
+ ionize_coeffs[i_v][4][0] * Te4);
if (b_exp[4]) rate = exp(rate); // it is always false anyway
printf("i_v %d Te %1.8E b_exp[4] %d 8 -> 4 rate %1.8E coeffs[4][0] \n", i_v, Te,
(b_exp[4] ? 1 : 0), rate, ionize_coeffs[i_v][4][0]);
};
}
};
};
};
};
};
};
};
return rate;
}
__device__ f64 GetIonizationRates(f64 const Te, f64 const v, f64 * p_Recombo_rate)
{
int i_vleft, i_vright;
f64 vleft, vright;
f64 ppn_right, ppn_left;
if (v < 1.0e7) {
i_vleft = (int)(v / 2.0e6);
i_vright = i_vleft + 1;
vleft = 2.0e6*(double)(i_vleft);
vright = 2.0e6*(double)(i_vright); // at most 1e7
ppn_right = (v - vleft) / (vright - vleft);
ppn_left = (vright - v) / (vright - vleft);
} else {
if (v > 2.7e8) {
i_vleft = 31;
i_vright = 31;
ppn_left = 1.0;
ppn_right = 0.0;
vleft = 1.0e7*(double)(i_vleft - 4);
vright = 1.0e7*(double)(i_vright - 4); // careful
} else {
i_vleft = 4 + (int)(v / 1.0e7); // cHeck ?
if (i_vleft >= 31) i_vleft = 30;
i_vright = i_vleft + 1;
if (i_vright >= 32) i_vright = 31;
vleft = 1.0e7*(double)(i_vleft - 4);
vright = 1.0e7*(double)(i_vright - 4); // careful
ppn_right = (v - vleft) / (vright - vleft);
ppn_left = (vright - v) / (vright - vleft);
};
};
f64 rate_left = GetIonizationRate_given_v(Te, i_vleft);
f64 rate_right = GetIonizationRate_given_v(Te, i_vright);
f64 recomb_rate_left = GetRecombinationRate_given_v(Te, i_vleft);
f64 recomb_rate_right = GetRecombinationRate_given_v(Te, i_vright);
// now we need to go again given which v column to another function!
f64 rate = rate_left*ppn_left + rate_right*ppn_right;
*p_Recombo_rate = recomb_rate_left*ppn_left + recomb_rate_right*ppn_right;
return rate;
}
__device__ f64 GetIonizationRatesDebug(f64 const Te, f64 const v, f64 * p_Recombo_rate)
{
int i_vleft, i_vright;
f64 vleft, vright;
f64 ppn_right, ppn_left;
if (v < 1.0e7) {
i_vleft = (int)(v / 2.0e6);
i_vright = i_vleft + 1;
vleft = 2.0e6*(double)(i_vleft);
vright = 2.0e6*(double)(i_vright); // at most 1e7
ppn_right = (v - vleft) / (vright - vleft);
ppn_left = (vright - v) / (vright - vleft);
printf("GIRD small v: vleft %1.8E vright %1.8E ppn %1.8E %1.8E \n",
vleft, vright, ppn_left, ppn_right);
}
else {
if (v > 2.7e8) {
i_vleft = 31;
i_vright = 31;
ppn_left = 1.0;
ppn_right = 0.0;
vleft = 1.0e7*(double)(i_vleft - 4);
vright = 1.0e7*(double)(i_vright - 4); // careful
printf("GIRD high v: vleft %1.8E vright %1.8E ppn %1.8E %1.8E \n",
vleft, vright, ppn_left, ppn_right);
}
else {
i_vleft = 4 + (int)(v / 1.0e7); // cHeck ?
if (i_vleft >= 31) i_vleft = 30;
i_vright = i_vleft + 1;
vleft = 1.0e7*(double)(i_vleft - 4);
vright = 1.0e7*(double)(i_vright - 4); // careful
ppn_right = (v - vleft) / (vright - vleft);
ppn_left = (vright - v) / (vright - vleft);
printf("GIRD moderate v: vleft %1.8E vright %1.8E ppn %1.8E %1.8E \n",
vleft, vright, ppn_left, ppn_right);
};
};
f64 rate_left = GetIonizationRate_given_v_Debug(Te, i_vleft);
f64 rate_right = GetIonizationRate_given_v_Debug(Te, i_vright);
f64 recomb_rate_left = GetRecombinationRate_given_v(Te, i_vleft);
f64 recomb_rate_right = GetRecombinationRate_given_v(Te, i_vright);
printf("GIRD : rate_left %1.8E rate_right %1.8E \n", rate_left, rate_right);
// now we need to go again given which v column to another function!
f64 rate = rate_left*ppn_left + rate_right*ppn_right;
*p_Recombo_rate = recomb_rate_left*ppn_left + recomb_rate_right*ppn_right;
return rate;
}
__global__ void kernelCompare(
f64_vec2 * __restrict__ p_epsxy,
f64 * __restrict__ p_epsiz,
f64 * __restrict__ p_epsez,
f64_vec2 * __restrict__ p_epsxyp,
f64 * __restrict__ p_epsizp,
f64 * __restrict__ p_epsezp,
f64 * __restrict__ p_distance
)
{
long const index = blockDim.x*blockIdx.x + threadIdx.x;
f64_vec2 eps1 = p_epsxy[index];
f64_vec2 eps2 = p_epsxyp[index];
f64 diff = eps1.x - eps2.x;
f64 epsez = p_epsez[index];
f64 epsezp = p_epsezp[index];
diff = epsez - epsezp;
if (index == CHOSEN) printf("%d epsez %1.14E pred %1.14E diff %1.8E ppn %1.4E\n",
index, epsez, epsezp, diff, diff/epsez);
p_distance[index] = fabs(diff/(fabs(epsez)+1.0e+2));
/*if ((fabs(diff) > 1.0e-10) && (fabs(diff) > fabs(1.0e-8*eps2.x))) printf("%d x dimension : %1.10E %1.10E\n", index,
eps1.x, eps2.x);
diff = eps1.y - eps2.y;
if ((fabs(diff) > 1.0e-10) && (fabs(diff) > 1.0e-8*fabs(eps2.y))) printf("%d y dimension : %1.10E %1.10E \n", index,
eps1.y, eps2.y);
f64 epsiz = p_epsiz[index];
f64 epsizp = p_epsizp[index];
diff = epsiz - epsizp;
if ((fabs(diff) > 1.0e-10) && (fabs(diff) > 1.0e-8*fabs(epsizp))) printf("%d iz : %1.10E %1.10E \n", index,
epsiz, epsizp);
f64 epsez = p_epsez[index];
f64 epsezp = p_epsezp[index];
diff = epsez - epsezp;
if ((fabs(diff) > 1.0e-10) && (fabs(diff) > 1.0e-8*fabs(epsezp))) printf("%d ez : %1.10E %1.10E \n", index,
epsez, epsezp);*/
// we do not want it to null out the routine
p_epsxy[index] = eps1;
p_epsez[index] = epsez;
}
__global__ void kernelSplitIntoSeedRegressors
(
v4 * __restrict__ p_move,
f64_vec3 * __restrict__ p_regr_i,
f64_vec3 * __restrict__ p_regr_e,
f64_vec2 * __restrict__ p_epsxy
) {
long const iMinor = threadIdx.x + blockIdx.x*blockDim.x;
v4 v = p_move[iMinor];
// f64_vec2 eps = p_epsxy[iMinor];
// leave epsilon out for now - would serve as multiplying factor.
f64_vec3 regr_i, regr_e;
regr_e.x = 0.0; regr_e.y = 0.0;
regr_i.x = v.vxy.x; regr_i.y = v.vxy.y;
regr_i.z = v.viz;
regr_e.z = v.vez;
p_regr_i[iMinor] = regr_i;
p_regr_e[iMinor] = regr_e;
}
__global__ void kernelCalcJacobi_Viscosity(
structural * __restrict__ p_info_minor,
f64_vec2 * __restrict__ p_epsilon_xy,
f64 * __restrict__ p_epsilon_iz,
f64 * __restrict__ p_epsilon_ez,
f64_tens3 * __restrict__ p_matrix_i,
f64_tens3 * __restrict__ p_matrix_e, // inverted matrix R^-1 so Jacobi = R^-1 epsilon
f64_vec3 * __restrict__ p_Jacobi_i,
f64_vec3 * __restrict__ p_Jacobi_e)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
structural info = p_info_minor[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
f64_vec2 epsilon_xy = p_epsilon_xy[iMinor];
f64 epsilon_iz = p_epsilon_iz[iMinor];
f64 epsilon_ez = p_epsilon_ez[iMinor];
f64_tens3 matrix = p_matrix_i[iMinor];
p_Jacobi_i[iMinor] = matrix*Make3(epsilon_xy, epsilon_iz);
matrix = p_matrix_e[iMinor];
p_Jacobi_e[iMinor] = matrix*Make3(epsilon_xy, epsilon_ez);
} else {
memset(&(p_Jacobi_i[iMinor]), 0, sizeof(f64_vec3));
memset(&(p_Jacobi_e[iMinor]), 0, sizeof(f64_vec3));
}
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX)) {
f64_vec2 epsilon_xy = p_epsilon_xy[iVertex + BEGINNING_OF_CENTRAL];
f64 epsilon_iz = p_epsilon_iz[iVertex + BEGINNING_OF_CENTRAL];
f64 epsilon_ez = p_epsilon_ez[iVertex + BEGINNING_OF_CENTRAL];
f64_tens3 matrix = p_matrix_i[iVertex + BEGINNING_OF_CENTRAL];
p_Jacobi_i[iVertex + BEGINNING_OF_CENTRAL] = matrix*Make3(epsilon_xy, epsilon_iz);
matrix = p_matrix_e[iVertex + BEGINNING_OF_CENTRAL];
p_Jacobi_e[iVertex + BEGINNING_OF_CENTRAL] = matrix*Make3(epsilon_xy, epsilon_ez);
} else {
memset(&(p_Jacobi_i[iVertex + BEGINNING_OF_CENTRAL]), 0, sizeof(f64_vec3));
memset(&(p_Jacobi_e[iVertex + BEGINNING_OF_CENTRAL]), 0, sizeof(f64_vec3));
};
}
}
__global__ void kernelCalc_Matrices_for_Jacobi_Viscosity(
f64 const hsub,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_parallel_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_ita_parallel_elec_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_elec_minor, // nT / nu ready to look up
f64_vec3 * __restrict__ p_B_minor,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64_tens3 * __restrict__ p_matrix_i,
f64_tens3 * __restrict__ p_matrix_e
)
{
//__shared__ v4 shared_vie[threadsPerTileMinor]; // sort of thing we want as input
// Not used, right? Nothing nonlinear?
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_B[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
//__shared__ v4 shared_vie_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_B_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// 4+2+2+1+1 *1.5 = 15 per thread. That is possibly as slow as having 24 per thread.
// Thus putting some stuff in shared may speed up if there are spills.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64_vec2 opppos, prevpos, nextpos;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_B[threadIdx.x] = p_B_minor[iMinor].xypart();
shared_ita_par[threadIdx.x] = p_ita_parallel_ion_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_ion_minor[iMinor];
// Perhaps the real answer is this. Advection and therefore advective momflux
// do not need to be recalculated very often at all. At 1e6 cm/s, we aim for 1 micron,
// get 1e-10s to actually do the advection !!
// So an outer cycle. Still limiting the number of total things in a minor tile. We might like 384 = 192*2.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_B_verts[threadIdx.x] = p_B_minor[iVertex + BEGINNING_OF_CENTRAL].xypart();
if ((info.flag == DOMAIN_VERTEX))
{
// memcpy(&(shared_vie_verts[threadIdx.x]), &(p_vie_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(v4));
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
// memset(&(shared_vie_verts[threadIdx.x]), 0, sizeof(v4));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
f64_vec2 cc0, cc1;
if (threadIdx.x < threadsPerTileMajor) {
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
if (info.flag == DOMAIN_VERTEX)
//|| (info.flag == OUTERMOST)) // !!!!!!!!!!!!!!!!
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
f64_tens3 J; // Jacobean
memset(&J, 0, sizeof(f64_tens3));
//d_eps_x_by_d_vx = 1.0;
J.xx = 1.0;
J.yy = 1.0;
J.zz = 1.0;
// d_eps_z_by_d_viz = 1.0; // Note that eps includes v_k+1
if (shared_ita_par_verts[threadIdx.x] > 0.0) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ci;
// ** Be especially vigilant to the changes we need to make to go from ion to electron.
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
// Now sort out anticlock vars:
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
// Order of calculations may help things to go out/into scope at the right times so careful with that.
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//gradvy.y = -0.5*(
// (our_v.vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
// + (prev_v.vxy.y + our_v.vxy.y)*(prevpos.x - info.pos.x)
// + (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
// + (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
// ) / area_quadrilateral;
//
// so we want to know, eps += U v_self for U 4x4
f64 grad_vjdx_coeff_on_vj_self = 0.5*(prevpos.y - nextpos.y) / area_quadrilateral;
f64 grad_vjdy_coeff_on_vj_self = 0.5*(nextpos.x - prevpos.x) / area_quadrilateral;
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
{
f64_vec2 opp_B;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
f64 ita_theirs = p_ita_parallel_ion_minor[izTri[i]];
f64 nu_theirs = p_nu_ion_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = ita_theirs;
nu = nu_theirs;
};
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
} // Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (ita_par > 0.0) {
Augment_Jacobean(&J,
hsub / (p_n_minor[iVertex + BEGINNING_OF_CENTRAL].n*p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] * m_ion),
edge_normal, ita_par, nu, omega_ci,
grad_vjdx_coeff_on_vj_self,
grad_vjdy_coeff_on_vj_self
);
};
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
}; // next i
}; // ita_par > 0
f64_tens3 result;
J.Inverse(result);
memcpy(&(p_matrix_i[iVertex + BEGINNING_OF_CENTRAL]), &result, sizeof(f64_tens3));
// inverted it so that we are ready to put Jacobi = result.eps
} else {
// NOT domain vertex: Do nothing
// NOTE: We did not include OUTERMOST. Justification / effect ??
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
//if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
{
long izNeighMinor[6];
char szPBC[6];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_tens3 J; // Jacobean
memset(&J, 0, sizeof(f64_tens3));
//d_eps_x_by_d_vx = 1.0;
J.xx = 1.0;
J.yy = 1.0;
J.zz = 1.0;
if (shared_ita_par[threadIdx.x] > 0.0) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ci;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
// nu = 1.0e10; // DEBUG
bool bUsableSide = true;
{
f64_vec2 opp_B(0.0, 0.0);
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
f64 ita_par_opp = p_ita_parallel_ion_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_ion_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs; // Did I know we were doing this? We use the MINIMUM ita ?
// . We should probably stop that.
}
if (ita_par_opp == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (bUsableSide) {
// New definition of endpoint of minor edge:
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
f64 grad_vjdx_coeff_on_vj_self = 0.5*(prevpos.y - nextpos.y) / area_quadrilateral;
f64 grad_vjdy_coeff_on_vj_self = 0.5*(nextpos.x - prevpos.x) / area_quadrilateral;
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
// can't do prev_v == 0.0
// have to see if prev pos inside ins.
if (prevpos.dot(prevpos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER) // prev is in the insulator.
{
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
grad_vjdx_coeff_on_vj_self = 0.5*((info.pos.y - nextpos.y)+ (opppos.y - info.pos.y)) / area_triangle;
grad_vjdy_coeff_on_vj_self = -0.5*((info.pos.x - nextpos.x)+ (opppos.x - info.pos.x)) / area_triangle;
}
else {
if (nextpos.dot(nextpos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER) // prev is in the insulator.
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
grad_vjdx_coeff_on_vj_self = 0.5*(
(prevpos.y - info.pos.y)
+ (info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
grad_vjdy_coeff_on_vj_self = -0.5*(
(prevpos.x - info.pos.x)
+ (info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
};
};
};
};
Augment_Jacobean(&J,
hsub / (p_n_minor[iMinor].n * p_AreaMinor[iMinor] * m_ion),
edge_normal, ita_par, nu, omega_ci,
grad_vjdx_coeff_on_vj_self,
grad_vjdy_coeff_on_vj_self
);
}
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
};
}; // ita_par > 0.0
f64_tens3 result;
J.Inverse(result);
memcpy(&(p_matrix_i[iMinor]), &result, sizeof(f64_tens3));
} else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
__syncthreads();
// Now do electron: overwrite ita and nu, copy-paste the above codes very carefully
shared_ita_par[threadIdx.x] = p_ita_parallel_elec_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_elec_minor[iMinor];
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX)) // keeping consistent with ion above where we did put OUTERMOST here
{// but we set ita to 0 in the pre routine for outermost.
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
}
else {
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
if (threadIdx.x < threadsPerTileMajor) {
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len; // ?!
if ((info.flag == DOMAIN_VERTEX))
//|| (info.flag == OUTERMOST))
{
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
f64_tens3 J; // Jacobean
memset(&J, 0, sizeof(f64_tens3));
//d_eps_x_by_d_vx = 1.0;
J.xx = 1.0;
J.yy = 1.0;
J.zz = 1.0;
if (shared_ita_par_verts[threadIdx.x] > 0.0) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
// All same as ion here:
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
f64 grad_vjdx_coeff_on_vj_self = 0.5*(prevpos.y - nextpos.y) / area_quadrilateral;
f64 grad_vjdy_coeff_on_vj_self = 0.5*(nextpos.x - prevpos.x) / area_quadrilateral;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
opp_ita = shared_ita_par[izTri[i] - StartMinor];
opp_nu = shared_nu[izTri[i] - StartMinor];
//ita_par = 0.5*(shared_ita_par_verts[threadIdx.x] + shared_ita_par[izTri[i] - StartMinor]);
//nu = 0.5*(shared_nu_verts[threadIdx.x] + shared_nu[izTri[i] - StartMinor]);
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izTri[i]];
opp_nu = p_nu_elec_minor[izTri[i]];
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par_verts[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (ita_par > 0.0)
Augment_Jacobean(&J,
hsub / (p_n_minor[iVertex + BEGINNING_OF_CENTRAL].n * p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] * m_e),
edge_normal, ita_par, nu, omega_ce,
grad_vjdx_coeff_on_vj_self,
grad_vjdy_coeff_on_vj_self
);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
}; // next i
}; // ita_par > 0
f64_tens3 result;
J.Inverse(result);
memcpy(&(p_matrix_e[iVertex + BEGINNING_OF_CENTRAL]), &result, sizeof(f64_tens3));
} else {
// NOT domain vertex: Do nothing
};
};
// Electrons in tris:
info = p_info_minor[iMinor];
long izNeighMinor[6];
char szPBC[6];
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
}
else {
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_tens3 J; // Jacobean
memset(&J, 0, sizeof(f64_tens3));
//d_eps_x_by_d_vx = 1.0;
J.xx = 1.0;
J.yy = 1.0;
J.zz = 1.0;
if (shared_ita_par[threadIdx.x] > 0.0) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
bool bUsableSide = true;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
opp_ita = shared_ita_par[izNeighMinor[i] - StartMinor];
opp_nu = shared_nu[izNeighMinor[i] - StartMinor];
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_ita = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izNeighMinor[i]];
opp_nu = p_nu_elec_minor[izNeighMinor[i]];
if (opp_ita == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (bUsableSide) {
// New definition of endpoint of minor edge:
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
f64 grad_vjdx_coeff_on_vj_self = 0.5*(prevpos.y - nextpos.y) / area_quadrilateral;
f64 grad_vjdy_coeff_on_vj_self = 0.5*(nextpos.x - prevpos.x) / area_quadrilateral;
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
// can't do prev_v == 0.0
// have to see if prev pos inside ins.
if (prevpos.dot(prevpos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER) // prev is in the insulator.
{
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
grad_vjdx_coeff_on_vj_self = 0.5*((info.pos.y - nextpos.y) + (opppos.y - info.pos.y)) / area_triangle;
grad_vjdy_coeff_on_vj_self = -0.5*((info.pos.x - nextpos.x) + (opppos.x - info.pos.x)) / area_triangle;
}
else {
if (nextpos.dot(nextpos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER) // prev is in the insulator.
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
grad_vjdx_coeff_on_vj_self = 0.5*(
(prevpos.y - info.pos.y)
+ (info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
grad_vjdy_coeff_on_vj_self = -0.5*(
(prevpos.x - info.pos.x)
+ (info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
};
};
};
};
Augment_Jacobean(&J,
hsub / (p_n_minor[iMinor].n * p_AreaMinor[iMinor] * m_e),
edge_normal, ita_par, nu, omega_ce,
grad_vjdx_coeff_on_vj_self,
grad_vjdy_coeff_on_vj_self
);
};
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
};
}; // ita_par > 0.0
f64_tens3 result;
J.Inverse(result);
memcpy(&(p_matrix_e[iMinor]), &result, sizeof(f64_tens3));
}
else {
// Not domain, not crossing_ins, not a frill
} // non-domain tri
}; // was it FRILL
}
//
//__global__ void kernelCollectIntegralsMajorCells_FromSrcSyst(
//
// structural * __restrict__ p_info,
// long * __restrict__ p__triguess, // guess at tri where the point lies
// LONG3 * __restrict__ p_tri_corner_index, // guess at tri where the point lies
// LONG3 * __restrict__ p_tri_neigh_index, // guess at tri where the point lies
//
// bool * __restrict__ p__b_moved,
// Shardmodel * __restrict__ p_shards, // data to integrate
// )
//{
// // Think carefully about how this is to be done.
// // We need to send a chuffload of data to GPU in order to run this on GPU.
// // It can be done though.
//
// long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iMinor OF VERTEX
//
// // Target vertex where we do integrals.
// // This is for n and T.
//
// // We want a slim routine which stores the correct tri location when it is found, AND, only integrates mass.
//
// // What about when we come to integrate momentum on minors? Separate routine but still use linear model of nv on shards.
//
// // Defy all.
// long Src_start_point;
// if (p__b_moved[iVertex]) {
//
// // 1. Find src triangle containing dest point.
//
// // Get corner positions --> which half-planes is it outside? Can we end up scrolling tris indefinitely? iirc yes
//
// . Pick closest rotated image of dest, to move towards. If we keep moving triangles then we will land on the other side of the PB.
//
// . If we are in more than 1 clipping half-plane, choose the direction where dest is farther from the clip line in the orthogonal direction.
//
// // 2. Having found triangle where it lives, identify which corner to use : it must be true that we are within the shards of at least one of the
// // corners of the containing triangle.
//
// // Set Src_start_point. Be prepared to hit no intersection.
//
// } else {
// // it is co-located with previous position; so work outwards from the one that we know maps.
//
// Src_start_point = iVertex;
// }
//
// // Carry on and seek intersection in each of the neighbours.
//
// // We come unstuck because we cannot store a long list of additional places we must visit to accumulate integral.
//
//
//
//
//
// // We found somewhere with nonzero intersection.
//
// cpDest.GetIntersectionWithTriangle(cpIntersection);
//
// cpIntersection.IntegrateMass(shard corners, shard values of n, &result);
//
// Area_accum += cpIntersection.GetArea(); // We stop when this is 100% of total.
//
// mass_integral += result;
//
//
// if (Area_accum > 0.9999999*Total_dest_area) // we got em!
// {
// // can save off the accumulated sum of mass in the dest
//
// n = accum_mass / Area_accum;
// write to global memory.
// } else {
// // keep looking , but how?
//
//
//
// };
//
// To do on GPU is actually too difficult, can't make a dynamic list.
//}
//
__global__ void kernelAverage_n_T_x_to_tris(
nvals * __restrict__ p_n_minor,
nvals * __restrict__ p_n_major,
T3 * __restrict__ p_T_minor,
structural * __restrict__ p_info,
f64_vec2 * __restrict__ p_cc,
LONG3 * __restrict__ p_tri_corner_index,
CHAR4 * __restrict__ p_tri_periodic_corner_flags,
bool bCalculateOnCircumcenters
)
{
__shared__ nvals shared_n[threadsPerTileMajor];
__shared__ T3 shared_T[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos[threadsPerTileMajor];
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // iMinor OF VERTEX
if (threadIdx.x < threadsPerTileMajor)
{
long getindex = blockIdx.x * threadsPerTileMajor + threadIdx.x;
shared_n[threadIdx.x] = p_n_major[getindex];
shared_T[threadIdx.x] = p_T_minor[BEGINNING_OF_CENTRAL + getindex];
shared_pos[threadIdx.x] = p_info[BEGINNING_OF_CENTRAL + getindex].pos;
};
long const StartMajor = blockIdx.x*threadsPerTileMajor; // vertex iMinor
long const EndMajor = StartMajor + threadsPerTileMajor;
LONG3 const tri_corner_index = p_tri_corner_index[iMinor];
CHAR4 const tri_corner_per_flag = p_tri_periodic_corner_flags[iMinor];
structural info = p_info[iMinor];
__syncthreads();
T3 T(0.0, 0.0, 0.0);
nvals n(0.0, 0.0);
f64_vec2 pos(0.0, 0.0);
f64_vec2 cc(0.0, 0.0);
// New plan for this routine: go through position code for all cases except frills.
// Then compute averaging coefficients for domain and crossing_ins, and use them.
//
n.n = 0.0;
n.n_n = 0.0;
T.Te = 0.0; T.Ti = 0.0; T.Tn = 0.0;
f64_vec2 poscorner0, poscorner1, poscorner2;
if ((tri_corner_index.i1 >= StartMajor) && (tri_corner_index.i1 < EndMajor))
{
poscorner0 = shared_pos[tri_corner_index.i1 - StartMajor];
} else {
poscorner0 = p_info[tri_corner_index.i1 + BEGINNING_OF_CENTRAL].pos;
};
if (tri_corner_per_flag.per0 == ROTATE_ME_CLOCKWISE) poscorner0 = Clockwise_d*poscorner0;
if (tri_corner_per_flag.per0 == ROTATE_ME_ANTICLOCKWISE) poscorner0 = Anticlockwise_d*poscorner0;
if ((tri_corner_index.i2 >= StartMajor) && (tri_corner_index.i2 < EndMajor))
{
poscorner1 = shared_pos[tri_corner_index.i2 - StartMajor];
} else {
poscorner1 = p_info[tri_corner_index.i2 + BEGINNING_OF_CENTRAL].pos;
};
if (tri_corner_per_flag.per1 == ROTATE_ME_CLOCKWISE) poscorner1 = Clockwise_d*poscorner1;
if (tri_corner_per_flag.per1 == ROTATE_ME_ANTICLOCKWISE) poscorner1 = Anticlockwise_d*poscorner1;
if ((info.flag != INNER_FRILL) && (info.flag != OUTER_FRILL))
{
if ((tri_corner_index.i3 >= StartMajor) && (tri_corner_index.i3 < EndMajor))
{
poscorner2 = shared_pos[tri_corner_index.i3 - StartMajor];
} else {
poscorner2 = p_info[tri_corner_index.i3 + BEGINNING_OF_CENTRAL].pos;
};
if (tri_corner_per_flag.per2 == ROTATE_ME_CLOCKWISE) poscorner2 = Clockwise_d*poscorner2;
if (tri_corner_per_flag.per2 == ROTATE_ME_ANTICLOCKWISE) poscorner2 = Anticlockwise_d*poscorner2;
f64_vec2 Bb = poscorner1 - poscorner0;
f64_vec2 C = poscorner2 - poscorner0;
f64 D = 2.0*(Bb.x*C.y - Bb.y*C.x);
f64 modB = Bb.x*Bb.x + Bb.y*Bb.y;
f64 modC = C.x*C.x + C.y*C.y;
cc.x = (C.y*modB - Bb.y*modC) / D + poscorner0.x;
cc.y = (Bb.x*modC - C.x*modB) / D + poscorner0.y;
pos = THIRD*(poscorner1 + poscorner0 + poscorner2);
// Always project CC to insulator:
if ((info.flag == CROSSING_INS))
{
f64_vec2 cc2 = cc;
cc2.project_to_radius(cc, DEVICE_RADIUS_INSULATOR_OUTER);
};
// Hold up:
// If cc is outside the triangle, move towards pos until it is inside.
// Take cc-poscorner0 and look at the dimension that is perpendicular to poscorner1-poscorner2
// Is it greater than we get for poscorner1-poscorner0
// If so we've got to move towards pos; how do we know how far to move?
// Presumably component length changes linearly with change in vector so check component length for pos.
// Then test if we are outside the other edge normals.
f64_vec2 minus = cc - poscorner0;
f64_vec2 edgenormal;
edgenormal.x = poscorner2.y - poscorner1.y;
edgenormal.y = poscorner1.x - poscorner2.x;
// Are 0,1,2 anticlockwise? yes
// so if x = y2-y1 then it points out
f64 edgemod = edgenormal.modulus();
edgenormal /= edgemod;
f64 dist = minus.dot(edgenormal);
f64 dist2 = (poscorner2 - poscorner0).dot(edgenormal);
if (dist > dist2) {
f64 dist3 = (pos - poscorner0).dot(edgenormal);
// dist2 = lambda*dist3 + (1-lambda) dist
// lambda = (dist2-dist) / (dist3-dist)
cc.x += ((dist2 - dist) / (dist3 - dist))*(pos.x - cc.x);
cc.y += ((dist2 - dist) / (dist3 - dist))*(pos.y - cc.y);
}
minus = cc - poscorner2;
edgenormal.x = poscorner1.y - poscorner0.y;
edgenormal.y = poscorner0.x - poscorner1.x;
edgemod = edgenormal.modulus();
edgenormal /= edgemod;
dist = minus.dot(edgenormal);
dist2 = (poscorner0 - poscorner2).dot(edgenormal);
if (dist > dist2) {
f64 dist3 = (pos - poscorner2).dot(edgenormal);
cc.x += ((dist2 - dist) / (dist3 - dist))*(pos.x - cc.x);
cc.y += ((dist2 - dist) / (dist3 - dist))*(pos.y - cc.y);
}
minus = cc - poscorner1;
edgenormal.x = poscorner0.y - poscorner2.y;
edgenormal.y = poscorner2.x - poscorner0.x;
edgemod = edgenormal.modulus();
edgenormal /= edgemod;
dist = minus.dot(edgenormal);
dist2 = (poscorner0 - poscorner1).dot(edgenormal);
if (dist > dist2) {
f64 dist3 = (pos - poscorner1).dot(edgenormal);
cc.x += ((dist2 - dist) / (dist3 - dist))*(pos.x - cc.x);
cc.y += ((dist2 - dist) / (dist3 - dist))*(pos.y - cc.y);
}
} else {
// FRILL
pos = 0.5*(poscorner1 + poscorner0);
f64_vec2 pos2 = pos;
if (info.flag == INNER_FRILL) {
pos2.project_to_radius(pos, FRILL_CENTROID_INNER_RADIUS_d);
} else {
pos2.project_to_radius(pos, FRILL_CENTROID_OUTER_RADIUS_d);
};
cc = pos;
}
// Now set up averaging coefficients and set n,T.
// Outer frills it is thus set to n=0,T=0.
// Well, circumcenter is equidistant so 1/3 is still reasonable average.
// I think I prefer linear interpolation, making this a point estimate of n. The masses are saved
// in the vertcells.
if (info.flag == DOMAIN_TRIANGLE) {
f64 lambda1, lambda2, lambda3;
if (bCalculateOnCircumcenters) {
f64_vec2 x0 = poscorner0, x1 = poscorner1, x2 = poscorner2;
f64_vec2 a1, a2;
f64 b1, b2;
// a1.x = (x1.y - x2.y) / ((x0.x - x2.x)*(x1.y - x2.y) - (x1.x - x2.x)*(x0.y - x2.y));
// a1.y = (x2.x - x1.x) / ((x0.x - x2.x)*(x1.y - x2.y) - (x1.x - x2.x)*(x0.y - x2.y));
// b1 = -a1.x*x2.x - a1.y*x2.y;
// a2.x = (x0.y - x2.y) / ((x1.x - x2.x)*(x0.y - x2.y) - (x1.y - x2.y)*(x0.x - x2.x));
// a2.y = (x2.x - x0.x) / ((x1.x - x2.x)*(x0.y - x2.y) - (x1.y - x2.y)*(x0.x - x2.x));
// b2 = -a2.x*x2.x - a2.y*x2.y;
// lambda1 = a1.x*cc.x + a1.y*cc.y + b1;
// lambda2 = a2.x*cc.x + a2.y*cc.y + b2;
// lambda3 = 1.0 - lambda1 - lambda2;
// We are getting lambda3 < 0 when the point is well inside the triangle.
// What gives?
// Try this instead:
lambda1 = ((x1.y - x2.y)*(cc.x - x2.x) + (x2.x - x1.x)*(cc.y - x2.y)) /
((x1.y - x2.y)*(x0.x - x2.x) + (x2.x - x1.x)*(x0.y - x2.y));
lambda2 = ((x2.y-x0.y)*(cc.x-x2.x) + (x0.x-x2.x)*(cc.y-x2.y))/
((x1.y - x2.y)*(x0.x - x2.x) + (x2.x - x1.x)*(x0.y - x2.y));
lambda3 = 1.0 - lambda1 - lambda2;
} else {
lambda1 = THIRD;
lambda2 = THIRD;
lambda3 = THIRD;
};
if ((tri_corner_index.i1 >= StartMajor) && (tri_corner_index.i1 < EndMajor))
{
n += lambda1*shared_n[tri_corner_index.i1 - StartMajor];
T += lambda1*shared_T[tri_corner_index.i1 - StartMajor];
if (TESTTRI) printf("%d sharedvers n %1.10E contribnn %1.10E Tn %1.12E %d\n",
iMinor, n.n, shared_n[tri_corner_index.i1 - StartMajor].n_n,
shared_T[tri_corner_index.i1 - StartMajor].Tn, tri_corner_index.i1);
} else {
n += lambda1*p_n_major[tri_corner_index.i1];
T += lambda1*p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL];
if (TESTTRI) printf("%d loadvers n %1.10E contribnn %1.10E Tn %1.12E\n",
iMinor, n.n, p_n_major[tri_corner_index.i1].n_n,
p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL].Tn);
};
if ((tri_corner_index.i2 >= StartMajor) && (tri_corner_index.i2 < EndMajor))
{
n += lambda2*shared_n[tri_corner_index.i2 - StartMajor];
T += lambda2*shared_T[tri_corner_index.i2 - StartMajor];
if (TESTTRI) printf("%d sharedvers n %1.10E contribnn %1.10E Tn %1.12E %d\n",
iMinor, n.n, shared_n[tri_corner_index.i2 - StartMajor].n_n,
shared_T[tri_corner_index.i2 - StartMajor].Tn, tri_corner_index.i2);
}
else {
n += lambda2*p_n_major[tri_corner_index.i2];
T += lambda2*p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL];
if (TESTTRI) printf("%d loadvers n %1.10E contribnn %1.10E Tn %1.12E\n",
iMinor, n.n, p_n_major[tri_corner_index.i2].n_n,
p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL].Tn);
};
if ((tri_corner_index.i3 >= StartMajor) && (tri_corner_index.i3 < EndMajor))
{
n += lambda3*shared_n[tri_corner_index.i3 - StartMajor];
T += lambda3*shared_T[tri_corner_index.i3 - StartMajor];
if (TESTTRI) printf("%d sharedvers n %1.10E contribnn %1.10E Tn %1.12E %d\n",
iMinor, n.n, shared_n[tri_corner_index.i3 - StartMajor].n_n,
shared_T[tri_corner_index.i3 - StartMajor], tri_corner_index.i3);
}
else {
n += lambda3*p_n_major[tri_corner_index.i3];
T += lambda3*p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL];
if (TESTTRI) printf("%d loadvers n %1.10E contribnn %1.10E Tn %1.12E\n",
iMinor, n.n, p_n_major[tri_corner_index.i3].n_n,
p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL].Tn);
};
if (TESTTRI)
printf("%d: lambda %1.10E %1.10E %1.10E\ncorner n %1.10E %1.10E %1.10E\n"
"cc %1.9E %1.9E | %1.9E %1.9E | %1.9E %1.9E | %1.9E %1.9E \n"
"indexcorner %d %d %d result nn= %1.10E Tn %1.12E \n\n",
iMinor, lambda1, lambda2, lambda3,
p_n_major[tri_corner_index.i1].n,
p_n_major[tri_corner_index.i2].n,
p_n_major[tri_corner_index.i3].n,
cc.x,cc.y, poscorner0.x, poscorner0.y, poscorner1.x, poscorner1.y, poscorner2.x, poscorner2.y,
tri_corner_index.i1, tri_corner_index.i2, tri_corner_index.i3,
n.n_n, T.Tn
);
}
else {
// What else?
if (info.flag == CROSSING_INS)
{
int iAbove = 0;
if ((tri_corner_index.i1 >= StartMajor) && (tri_corner_index.i1 < EndMajor))
{
if (poscorner0.dot(poscorner0) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += shared_n[tri_corner_index.i1 - StartMajor];
T += shared_T[tri_corner_index.i1 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i1,
shared_n[tri_corner_index.i1 - StartMajor].n_n,
shared_T[tri_corner_index.i1 - StartMajor].Tn);
};
}
else {
if (poscorner0.dot(poscorner0) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += p_n_major[tri_corner_index.i1];
T += p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i1,
p_n_major[tri_corner_index.i1].n_n,
p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL].Tn);
}
};
if ((tri_corner_index.i2 >= StartMajor) && (tri_corner_index.i2 < EndMajor))
{
if (poscorner1.dot(poscorner1) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += shared_n[tri_corner_index.i2 - StartMajor];
T += shared_T[tri_corner_index.i2 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i2,
shared_n[tri_corner_index.i2 - StartMajor].n_n,
shared_T[tri_corner_index.i2 - StartMajor].Tn);
};
}
else {
if (poscorner1.dot(poscorner1) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += p_n_major[tri_corner_index.i2];
T += p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i2,
p_n_major[tri_corner_index.i2].n_n,
p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL].Tn);
};
};
if ((tri_corner_index.i3 >= StartMajor) && (tri_corner_index.i3 < EndMajor))
{
if (poscorner2.dot(poscorner2) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += shared_n[tri_corner_index.i3 - StartMajor];
T += shared_T[tri_corner_index.i3 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i3,
shared_n[tri_corner_index.i3 - StartMajor].n_n,
shared_T[tri_corner_index.i3 - StartMajor].Tn);
};
}
else {
if (poscorner2.dot(poscorner2) > DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
n += p_n_major[tri_corner_index.i3];
T += p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d INS tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i3,
p_n_major[tri_corner_index.i3].n_n,
p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL].Tn);
};
};
#ifdef PROJECT_TO_INS_ALWAYS
f64_vec2 pos2 = pos;
pos2.project_to_radius(pos, DEVICE_RADIUS_INSULATOR_OUTER);
#else
if (pos.dot(pos) < DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
f64_vec2 pos2 = pos;
pos2.project_to_radius(pos, DEVICE_RADIUS_INSULATOR_OUTER);
};
// project only if below insulator.!
#endif
f64 divide = 1.0 / (f64)iAbove;
n.n *= divide;
n.n_n *= divide;
T.Tn *= divide;
T.Ti *= divide;
T.Te *= divide;
if (TESTTRI)
printf("%d INS tri: iAbove %d Tn divided: %1.14E\n", iMinor, iAbove, T.Tn);
} else {
if (info.flag == CROSSING_CATH)
{
int iAbove = 0;
if ((tri_corner_index.i1 >= StartMajor) && (tri_corner_index.i1 < EndMajor))
{
if (poscorner0.x*poscorner0.x+(poscorner0.y-CATHODE_ROD_R_POSITION)*(poscorner0.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += shared_n[tri_corner_index.i1 - StartMajor];
T += shared_T[tri_corner_index.i1 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i1,
shared_n[tri_corner_index.i1 - StartMajor].n_n,
shared_T[tri_corner_index.i1 - StartMajor].Tn);
};
} else {
if (poscorner0.x*poscorner0.x + (poscorner0.y - CATHODE_ROD_R_POSITION)*(poscorner0.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += p_n_major[tri_corner_index.i1];
T += p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i1,
p_n_major[tri_corner_index.i1].n_n,
p_T_minor[tri_corner_index.i1 + BEGINNING_OF_CENTRAL].Tn);
}
};
if ((tri_corner_index.i2 >= StartMajor) && (tri_corner_index.i2 < EndMajor))
{
if (poscorner1.x*poscorner1.x + (poscorner1.y - CATHODE_ROD_R_POSITION)*(poscorner1.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += shared_n[tri_corner_index.i2 - StartMajor];
T += shared_T[tri_corner_index.i2 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i2,
shared_n[tri_corner_index.i2 - StartMajor].n_n,
shared_T[tri_corner_index.i2 - StartMajor].Tn);
};
} else {
if (poscorner1.x*poscorner1.x + (poscorner1.y - CATHODE_ROD_R_POSITION)*(poscorner1.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += p_n_major[tri_corner_index.i2];
T += p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i2,
p_n_major[tri_corner_index.i2].n_n,
p_T_minor[tri_corner_index.i2 + BEGINNING_OF_CENTRAL].Tn);
};
};
if ((tri_corner_index.i3 >= StartMajor) && (tri_corner_index.i3 < EndMajor))
{
if (poscorner2.x*poscorner2.x + (poscorner2.y - CATHODE_ROD_R_POSITION)*(poscorner2.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += shared_n[tri_corner_index.i3 - StartMajor];
T += shared_T[tri_corner_index.i3 - StartMajor];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i3,
shared_n[tri_corner_index.i3 - StartMajor].n_n,
shared_T[tri_corner_index.i3 - StartMajor].Tn);
};
}
else {
if (poscorner2.x*poscorner2.x + (poscorner2.y - CATHODE_ROD_R_POSITION)*(poscorner2.y - CATHODE_ROD_R_POSITION) > CATHODE_ROD_RADIUS*CATHODE_ROD_RADIUS)
{
n += p_n_major[tri_corner_index.i3];
T += p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL];
iAbove++;
if (TESTTRI)
printf("%d CATH tri: vertex %d nn %1.12E Tn %1.12E\n",
iMinor, tri_corner_index.i3,
p_n_major[tri_corner_index.i3].n_n,
p_T_minor[tri_corner_index.i3 + BEGINNING_OF_CENTRAL].Tn);
};
};
f64_vec2 pos2 = pos;
// pos2.y -= CATHODE_ROD_R_POSITION;
// Now we need to project it on to the circle about (0.0,5.0)
// pos2.project_to_radius(pos, CATHODE_ROD_RADIUS);
// pos.y += CATHODE_ROD_R_POSITION;
// Do not project. !
f64 divide = 1.0 / (f64)iAbove;
n.n *= divide;
n.n_n *= divide;
T.Tn *= divide;
T.Ti *= divide;
T.Te *= divide;
if (TESTTRI)
printf("%d CATH tri: iAbove %d Tn divided: %1.14E\n", iMinor, iAbove, T.Tn);
} else {
// Cool neutrals in frill:
if (info.flag == OUTER_FRILL) {
n.n = INITIAL_BACKGROUND_ION_DENSITY;
n.n_n = INITIAL_TOTAL_DENSITY - INITIAL_BACKGROUND_ION_DENSITY;
T.Te = 4.0e-14; T.Ti = 4.0e-14; T.Tn = 4.0e-14;
} else {
// inner frill? out of domain?
n.n = 0.0;
n.n_n = 0.0;
T.Te = 0.0; T.Ti = 0.0; T.Tn = 0.0;
}
}
};
// Outer frills it is thus set to n=0,T=0.
};
if (TESTTRI) printf("\n%d flag %d Tn %1.12E info.pos.x %1.9E cc.x %1.9E \n", iMinor, info.flag, T.Tn, pos.x, cc.x);
p_n_minor[iMinor] = n;
p_T_minor[iMinor] = T;
info.pos = pos;
p_info[iMinor] = info;
p_cc[iMinor] = cc;
}
__global__ void kernelCreateShardModelOfDensities_And_SetMajorArea(
structural * __restrict__ p_info_minor,
nvals * __restrict__ p_n_major,
nvals * __restrict__ p_n_minor,
long * __restrict__ p_izTri_vert,
char * __restrict__ p_szPBCtri_vert,
f64_vec2 * __restrict__ p_cc,
ShardModel * __restrict__ p_n_shards,
ShardModel * __restrict__ p_n_n_shards,
// long * __restrict__ Tri_n_lists,
// long * __restrict__ Tri_n_n_lists ,
f64 * __restrict__ p_AreaMajor,
bool bUseCircumcenter
)// sets n_shards_n, n_shards, Tri_n_n_lists, Tri_n_lists
{
// called for major tile
// Interpolation to Tri_n_lists, Tri_n_n_lists is not yet implemented. But this would be output.
// Inputs:
// n, pTri->cent, izTri, pTri->periodic, pVertex->pos
// Outputs:
// pVertex->AreaCell
// n_shards[iVertex]
// Tri_n_n_lists[izTri[i]][o1 * 2] <--- 0 if not set by domain vertex
// CALL AVERAGE OF n TO TRIANGLES - WANT QUADRATIC AVERAGE - BEFORE WE BEGIN
// MUST ALSO POPULATE pVertex->AreaCell with major cell area
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ nvals shared_n[threadsPerTileMinor];
// Here 4 doubles/minor. In 16*1024, 4 double*8 bytes*512 minor. 256 major.
// Choosing to store n_n while doing n which is not necessary.
ShardModel n_; // to be populated
int iNeigh, tri_len;
f64 N_n, N, interpolated_n, interpolated_n_n;
long i, inext, o1, o2;
//memset(Tri_n_n_lists, 0, sizeof(f64)*NUMTRIANGLES * 6);
//memset(Tri_n_lists, 0, sizeof(f64)*NUMTRIANGLES * 6);
// We can afford to stick 6-8 doubles in shared. 8 vars*8 bytes*256 threads = 16*1024.
if (bUseCircumcenter == false)
{
structural info2[2];
memcpy(info2, p_info_minor + blockIdx.x*threadsPerTileMinor + 2 * threadIdx.x, sizeof(structural) * 2);
shared_pos[2 * threadIdx.x] = info2[0].pos;
shared_pos[2 * threadIdx.x + 1] = info2[1].pos;
}
else {
memcpy(&(shared_pos[2 * threadIdx.x]), p_cc + blockIdx.x*threadsPerTileMinor + 2 * threadIdx.x, sizeof(f64_vec2) * 2);
}
memcpy(&(shared_n[2 * threadIdx.x]), p_n_minor + blockIdx.x*threadsPerTileMinor + 2 * threadIdx.x, sizeof(nvals) * 2);
__syncthreads();
long const StartMinor = blockIdx.x*threadsPerTileMinor; // vertex index
long const EndMinor = StartMinor + threadsPerTileMinor;
// To fit in Tri_n_n_lists stuff we should first let coeff[] go out of scope.
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[BEGINNING_OF_CENTRAL + iVertex];
if (info.flag == DOMAIN_VERTEX) {
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
f64 coeff[MAXNEIGH]; // total 21*12 = 252 bytes. 256 max for 192 threads.
f64 ndesire0, ndesire1;
f64_vec2 pos0, pos1;
memcpy(izTri, p_izTri_vert + MAXNEIGH_d*iVertex, sizeof(long)*MAXNEIGH_d);
memcpy(szPBC, p_szPBCtri_vert + MAXNEIGH_d*iVertex, sizeof(char)*MAXNEIGH_d);
f64 n_avg = p_n_major[iVertex].n;
// WHY WAS IT minor NOT major ?????????????????????????
if ((izTri[0] >= StartMinor) && (izTri[0] < EndMinor)) {
pos0 = shared_pos[izTri[0] - StartMinor];
ndesire0 = shared_n[izTri[0] - StartMinor].n;
}
else {
if (bUseCircumcenter) {
pos0 = p_cc[izTri[0]];
}
else {
pos0 = p_info_minor[izTri[0]].pos;
} // there exists a more elegant way than this!!!
ndesire0 = p_n_minor[izTri[0]].n;
}
if (szPBC[0] == ROTATE_ME_CLOCKWISE) pos0 = Clockwise_d*pos0;
if (szPBC[0] == ROTATE_ME_ANTICLOCKWISE) pos0 = Anticlockwise_d*pos0;
f64 tri_area;
f64 N0 = 0.0; f64 coeffcent = 0.0;
memset(coeff, 0, sizeof(f64)*MAXNEIGH_d);
short i;
f64 AreaMajor = 0.0;
f64 high_n = ndesire0;
f64 low_n = ndesire0;
#pragma unroll MAXNEIGH
for (i = 0; i < info.neigh_len; i++)
{
// Temporary setting:
n_.n[i] = ndesire0;
inext = i + 1; if (inext == info.neigh_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor)) {
pos1 = shared_pos[izTri[inext] - StartMinor];
ndesire1 = shared_n[izTri[inext] - StartMinor].n;
}
else {
if (bUseCircumcenter) {
pos1 = p_cc[izTri[inext]];
}
else {
pos1 = p_info_minor[izTri[inext]].pos;
}
ndesire1 = p_n_minor[izTri[inext]].n;
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) pos1 = Clockwise_d*pos1;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) pos1 = Anticlockwise_d*pos1;
high_n = max(ndesire1, high_n);
low_n = min(ndesire1, low_n);
tri_area = fabs(0.5*
((pos0.x + pos1.x) * (pos1.y - pos0.y)
+ (info.pos.x + pos1.x) * (info.pos.y - pos1.y)
+ (info.pos.x + pos0.x) * (pos0.y - info.pos.y)));
if (TEST1) printf("%d : ndesire0 %1.10E ndesire1 %1.10E high_n low_n %1.8E %1.8E tri_area %1.9E\n", VERTCHOSEN,
ndesire0, ndesire1, high_n, low_n, tri_area);
N0 += tri_area*THIRD*(ndesire0 + ndesire1);
coeff[i] += tri_area*THIRD;
coeff[inext] += tri_area*THIRD;
coeffcent += tri_area*THIRD;
AreaMajor += tri_area;
pos0 = pos1;
ndesire0 = ndesire1;
};
// . If n_avg > n_max_corners then set all to n_avg.
// . If n_min < n_needed < n_max then set n_cent = n_needed
// Otherwise, we now have coeff array populated and will go round
// repeatedly. We have to reload n lots of times.
// This is not the typical case.
p_AreaMajor[iVertex] = AreaMajor;
if ((n_avg > high_n) || (n_avg < low_n)) {
#pragma unroll MAXNEIGH
for (i = 0; i < info.neigh_len; i++)
n_.n[i] = n_avg;
n_.n_cent = n_avg;
if (TEST1) printf("VERTCHOSEN (n_avg > high_n) || (n_avg < low_n) \n");
// if (iVertex == CHOSEN) printf("CHOSEN : Switch1 n_avg %1.12E \n",n_avg);
}
else {
real n_C_need = (n_avg*AreaMajor - N0) / coeffcent;
if ((n_C_need > low_n) && (n_C_need < high_n)) {
n_.n_cent = n_C_need;
if (TEST1) printf("VERTCHOSEN ((n_C_need > low_n) && (n_C_need < high_n)) \n");
// if (iVertex == CHOSEN) printf("CHOSEN : Switch2 n_C_need %1.12E low_n %1.12E high_n %1.12E\n", n_C_need,low_n,high_n);
}
else {
// The laborious case.
// if (iVertex == CHOSEN) printf("Laborious case...\n");
if (TEST1) printf("VERTCHOSEN The laborious case. n_avg %1.10E n_C_need %1.10E low_n %1.10E high_n %1.10E\n",
n_avg, n_C_need, low_n, high_n);
bool fixed[MAXNEIGH];
memset(fixed, 0, sizeof(bool) * MAXNEIGH);
// cannot fit even this alongside the rest we have in L1.
// Can we make szPBC go out of scope by here?
f64 n_C, n_acceptable;
if (n_C_need < low_n) {
// the mass is low. So for those less than some n_acceptable,
// let them attain n_desire, and fix n_C = low_n.
// Then we'll see how high we can go with n_acceptable.
// if (iVertex == CHOSEN) printf("(n_C_need < low_n)\n");
n_C = low_n;
n_acceptable = (n_avg*AreaMajor - coeffcent*n_C) / (AreaMajor - THIRD*AreaMajor);
// area-THIRD*area = sum of other coeffs, and of course
// coeffcent = THIRD*area
// n_acceptable > N/area since N=area*n_avg > area*low_n.
// We accept things that are less than this 'max average', and
// let that increase the threshold; go again until
// the time we do not find any new lower items ;
bool found = 0;
do {
found = 0;
f64 coeffremain = 0.0;
f64 N_attained = coeffcent*low_n;
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) {
// Go collect ndesire[i]:
f64 ndesire;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
ndesire = shared_n[izTri[i] - StartMinor].n;
}
else {
ndesire = p_n_minor[izTri[i]].n;
};
// if (iVertex == CHOSEN) printf("CHOSEN : ndesire %1.14E n_acceptable %1.14E\n", ndesire,n_acceptable);
if (ndesire < n_acceptable) { // yes, use ndesire[i] ...
fixed[i] = true;
n_.n[i] = ndesire;
N_attained += n_.n[i] * coeff[i];
found = true;
}
else {
coeffremain += coeff[i];
};
}
else {
N_attained += n_.n[i] * coeff[i];
};
};
// It can happen that eventually ALL are found
// to be < n_acceptable due to FP error.
// On next pass found will be false.
if ((found != 0) && (coeffremain > 0.0)) {
n_acceptable = (n_avg*AreaMajor - N_attained) / coeffremain;
// The value to which we have to set the remaining
// n values.
};
// if (iVertex == CHOSEN) printf("---\n");
} while (found != 0);
}
else {
n_C = high_n;
n_acceptable = (n_avg*AreaMajor - coeffcent*n_C) / (AreaMajor - THIRD*AreaMajor);
bool found = 0;
do {
found = 0;
f64 coeffremain = 0.0;
f64 N_attained = coeffcent*high_n;
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) {
// Go collect ndesire[i]:
f64 ndesire;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
ndesire = shared_n[izTri[i] - StartMinor].n;
}
else {
ndesire = p_n_minor[izTri[i]].n;
};
// if (iVertex == CHOSEN) printf("CHOSEN : ndesire %1.14E n_acceptable %1.14E\n", ndesire, n_acceptable);
if (ndesire > n_acceptable) {
// yes, use ndesire[i] ...
fixed[i] = true;
n_.n[i] = ndesire;
N_attained += n_.n[i] * coeff[i];
found = true;
}
else {
coeffremain += coeff[i];
};
}
else {
N_attained += n_.n[i] * coeff[i];
};
};
if ((found != 0) && (coeffremain > 0.0)) {
n_acceptable = (n_avg*AreaMajor - N_attained) / coeffremain;
};
// if (iVertex == CHOSEN) printf("@@@ \n");
} while (found != 0);
};
// Now we should set the remaining values to n_acceptable
// which is less than ndesire[i] in all those cases.
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) n_.n[i] = n_acceptable;
if (TEST1) printf("n[%d]: %1.10E\n", i, n_.n[i]);
};
n_.n_cent = n_C;
if (TEST1) {
for (i = 0; i < info.neigh_len; i++)
{
printf("%1.10E \t\t", n_.n[i]);
}
printf("\nn_cent %1.14E \n\n", n_.n_cent);
};
};
};
memcpy(&(p_n_shards[iVertex]), &n_, sizeof(ShardModel));
if (TEST1) printf("iVertex %d n_cent %1.10E nmajor(=n_avg) %1.10E \n*********\n",
iVertex, n_.n_cent, n_avg);
// Now start again: neutrals
n_avg = p_n_major[iVertex].n_n;
if ((izTri[0] >= StartMinor) && (izTri[0] < EndMinor)) {
pos0 = shared_pos[izTri[0] - StartMinor];
ndesire0 = shared_n[izTri[0] - StartMinor].n_n;
}
else {
if (bUseCircumcenter) {
pos0 = p_cc[izTri[0]];
}
else {
pos0 = p_info_minor[izTri[0]].pos;
};
ndesire0 = p_n_minor[izTri[0]].n_n;
}
if (szPBC[0] == ROTATE_ME_CLOCKWISE) pos0 = Clockwise_d*pos0;
if (szPBC[0] == ROTATE_ME_ANTICLOCKWISE) pos0 = Anticlockwise_d*pos0;
N0 = 0.0;
//coeffcent = 0.0;
//memset(coeff, 0, sizeof(f64)*MAXNEIGH_d); // keep em
high_n = ndesire0;
low_n = ndesire0;
#pragma unroll MAXNEIGH
for (i = 0; i < info.neigh_len; i++)
{
// Temporary setting:
n_.n[i] = ndesire0;
inext = i + 1; if (inext == info.neigh_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor)) {
pos1 = shared_pos[izTri[inext] - StartMinor];
ndesire1 = shared_n[izTri[inext] - StartMinor].n_n;
}
else {
if (bUseCircumcenter) {
pos1 = p_cc[izTri[inext]];
}
else {
pos1 = p_info_minor[izTri[inext]].pos;
}
ndesire1 = p_n_minor[izTri[inext]].n_n;
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) pos1 = Clockwise_d*pos1;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) pos1 = Anticlockwise_d*pos1;
high_n = max(ndesire1, high_n);
low_n = min(ndesire1, low_n);
tri_area = fabs(0.5*
((pos0.x + pos1.x) * (pos1.y - pos0.y)
+ (info.pos.x + pos1.x) * (info.pos.y - pos1.y)
+ (info.pos.x + pos0.x) * (pos0.y - info.pos.y)));
N0 += tri_area*THIRD*(ndesire0 + ndesire1); // Could consider moving it into loop above.
pos0 = pos1;
ndesire0 = ndesire1;
};
// . If n_avg > n_max_corners then set all to n_avg.
// . If n_min < n_needed < n_max then set n_cent = n_needed
// Otherwise, we now have coeff array populated and will go round
// repeatedly. We have to reload n lots of times.
// This is not the typical case.
if ((n_avg > high_n) || (n_avg < low_n)) {
#pragma unroll MAXNEIGH
for (i = 0; i < info.neigh_len; i++)
n_.n[i] = n_avg;
n_.n_cent = n_avg;
}
else {
real n_C_need = (n_avg*AreaMajor - N0) / coeffcent;
if ((n_C_need > low_n) && (n_C_need < high_n)) {
n_.n_cent = n_C_need; // accept desired values
}
else {
// The laborious case.
bool fixed[MAXNEIGH];
memset(fixed, 0, sizeof(bool) * MAXNEIGH);
// cannot fit even this alongside the rest we have in L1.
// Can we make szPBC go out of scope by here?
f64 n_C, n_acceptable;
if (n_C_need < low_n) {
// the mass is low. So for those less than some n_acceptable,
// let them attain n_desire, and fix n_C = low_n.
// Then we'll see how high we can go with n_acceptable.
n_C = low_n;
n_acceptable = (n_avg*AreaMajor - coeffcent*n_C) / (AreaMajor - THIRD*AreaMajor);
// area-THIRD*area = sum of other coeffs, and of course
// coeffcent = THIRD*area
// n_acceptable > N/area since N=area*n_avg > area*low_n.
// We accept things that are less than this 'max average', and
// let that increase the threshold; go again until
// the time we do not find any new lower items ;
bool found = 0;
do {
found = 0;
f64 coeffremain = 0.0;
f64 N_attained = coeffcent*low_n;
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) {
// Go collect ndesire[i]:
f64 ndesire;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
ndesire = shared_n[izTri[i] - StartMinor].n_n;
}
else {
ndesire = p_n_minor[izTri[i]].n_n;
};
if (ndesire < n_acceptable) { // yes, use ndesire[i] ...
fixed[i] = true;
n_.n[i] = ndesire;
N_attained += n_.n[i] * coeff[i];
found = true;
}
else {
coeffremain += coeff[i];
};
}
else {
N_attained += n_.n[i] * coeff[i];
};
};
// It can happen that eventually ALL are found
// to be < n_acceptable due to FP error.
// On next pass found will be false.
if ((found != 0) && (coeffremain > 0.0)) {
n_acceptable = (n_avg*AreaMajor - N_attained) / coeffremain;
// The value to which we have to set the remaining
// n values.
};
} while (found != 0);
}
else {
n_C = high_n;
n_acceptable = (n_avg*AreaMajor - coeffcent*n_C) / (AreaMajor - THIRD*AreaMajor);
bool found = 0;
do {
found = 0;
f64 coeffremain = 0.0;
f64 N_attained = coeffcent*high_n;
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) {
// Go collect ndesire[i]:
f64 ndesire;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
ndesire = shared_n[izTri[i] - StartMinor].n_n;
}
else {
ndesire = p_n_minor[izTri[i]].n_n;
};
if (ndesire > n_acceptable) {
// yes, use ndesire[i] ...
fixed[i] = true;
n_.n[i] = ndesire;
N_attained += n_.n[i] * coeff[i];
found = true;
}
else {
coeffremain += coeff[i];
};
}
else {
N_attained += n_.n[i] * coeff[i];
};
};
if ((found != 0) && (coeffremain > 0.0)) {
n_acceptable = (n_avg*AreaMajor - N_attained) / coeffremain;
};
} while (found != 0);
};
// Now we should set the remaining values to n_acceptable
// which is less than ndesire[i] in all those cases.
for (i = 0; i < info.neigh_len; i++)
{
if (fixed[i] == 0) n_.n[i] = n_acceptable;
};
n_.n_cent = n_C;
};
};
memcpy(&(p_n_n_shards[iVertex]), &n_, sizeof(ShardModel));
// Now done both species.
}
else { // NOT DOMAIN_VERTEX
if (info.flag == OUTERMOST) {
n_.n_cent = p_n_major[iVertex].n;
for (i = 0; i < MAXNEIGH; i++)
n_.n[i] = n_.n_cent;
memcpy(&(p_n_shards[iVertex]), &n_, sizeof(ShardModel));
if (iVertex == VERTCHOSEN) printf("%d n_major.n %1.10E n_.n[4] %1.8E n_.n_cent %1.8E\n\n\n",
iVertex, p_n_major[iVertex].n, n_.n[4], n_.n_cent);
n_.n_cent = p_n_major[iVertex].n_n;
for (i = 0; i < MAXNEIGH; i++)
n_.n[i] = n_.n_cent;
memcpy(&(p_n_n_shards[iVertex]), &n_, sizeof(ShardModel));
f64 AreaTotal = PPN_CIRCLE*M_PI*(DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS -
INNER_A_BOUNDARY*INNER_A_BOUNDARY);
p_AreaMajor[iVertex] = AreaTotal / (real)(numTilesMajor*threadsPerTileMajor); // ?
// Setting area of outermost to average vertcell area
// ...
// Watch out for this when we make OUTERMOST FEWER
}
else {
memset(&(p_n_shards[iVertex]), 0, sizeof(ShardModel));
memset(&(p_n_n_shards[iVertex]), 0, sizeof(ShardModel));
p_AreaMajor[iVertex] = 0.0; // NOTE BENE
};
};
// NexT: tri_n_lists.
// Think I am not using this passing mechanism for n_shards information.
/*
for (i = 0; i < cp.numCoords; i++)
{
// for 2 triangles each corner:
// first check which number corner this vertex is
// make sure we enter them in order that goes anticlockwise for the
// Then we need to make izMinorNeigh match this somehow
// Let's say izMinorNeigh goes [across corner 0, across edge 2, corner 1, edge 0, corner 2, edge 1]
// We want 0,1 to be the values corresp corner 0.
// shard value 0 is in tri 0. We look at each pair of shard values in turn to interpolate.
inext = i + 1; if (inext == cp.numCoords) inext = 0;
interpolated_n = THIRD * (n_shards[iVertex].n[i] + n_shards[iVertex].n[inext] + n_shards[iVertex].n_cent);
interpolated_n_n = THIRD * (n_shards_n[iVertex].n[i] + n_shards_n[iVertex].n[inext] + n_shards_n[iVertex].n_cent);
// contribute to tris i and inext:
o1 = (T + izTri[i])->GetCornerIndex(X + iVertex);
o2 = (T + izTri[inext])->GetCornerIndex(X + iVertex);
// Now careful which one's which:
// inext sees this point as more anticlockwise.
Tri_n_lists[izTri[inext]][o2 * 2 + 1] = interpolated_n;
Tri_n_lists[izTri[i]][o1 * 2] = interpolated_n;
Tri_n_n_lists[izTri[inext]][o2 * 2 + 1] = interpolated_n_n;
Tri_n_n_lists[izTri[i]][o1 * 2] = interpolated_n_n;
};*/
}
__global__ void kernelCreateShardModelOfDensities_And_SetMajorAreaDEBUG(
structural * __restrict__ p_info_minor,
nvals * __restrict__ p_n_major,
nvals * __restrict__ p_n_minor,
long * __restrict__ p_izTri_vert,
char * __restrict__ p_szPBCtri_vert,
f64_vec2 * __restrict__ p_cc,
ShardModel * __restrict__ p_n_shards,
ShardModel * __restrict__ p_n_n_shards,
// long * __restrict__ Tri_n_lists,
// long * __restrict__ Tri_n_n_lists ,
f64 * __restrict__ p_AreaMajor,
bool bUseCircumcenter
)// sets n_shards_n, n_shards, Tri_n_n_lists, Tri_n_lists
{
// called for major tile
// Interpolation to Tri_n_lists, Tri_n_n_lists is not yet implemented. But this would be output.
// Inputs:
// n, pTri->cent, izTri, pTri->periodic, pVertex->pos
// Outputs:
// pVertex->AreaCell
// n_shards[iVertex]
// Tri_n_n_lists[izTri[i]][o1 * 2] <--- 0 if not set by domain vertex
// CALL AVERAGE OF n TO TRIANGLES - WANT QUADRATIC AVERAGE - BEFORE WE BEGIN
// MUST ALSO POPULATE pVertex->AreaCell with major cell area
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ nvals shared_n[threadsPerTileMinor];
// Here 4 doubles/minor. In 16*1024, 4 double*8 bytes*512 minor. 256 major.
// Choosing to store n_n while doing n which is not necessary.
ShardModel n_; // to be populated
int iNeigh, tri_len;
f64 N_n, N, interpolated_n, interpolated_n_n;
long i, inext, o1, o2;
long const StartMinor = blockIdx.x*threadsPerTileMinor; // vertex index
long const EndMinor = StartMinor + threadsPerTileMinor;
// To fit in Tri_n_n_lists stuff we should first let coeff[] go out of scope.
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[BEGINNING_OF_CENTRAL + iVertex];
if (info.flag == DOMAIN_VERTEX) {
} else { // NOT DOMAIN_VERTEX
if (info.flag == OUTERMOST) {
// f64 AreaTotal = PPN_CIRCLE*M_PI*(DOMAIN_OUTER_RADIUS*DOMAIN_OUTER_RADIUS -
// INNER_A_BOUNDARY*INNER_A_BOUNDARY);
// p_AreaMajor[iVertex] = AreaTotal;
// commented 2
// Setting area of outermost to average vertcell area
// ...
// Watch out for this when we make OUTERMOST FEWER
} else {
memset(&(p_n_shards[iVertex]), 0, sizeof(ShardModel));
// This alone kills it.
// Checking that it still dies when we scrap the first part.
// If this really kills then try doing cudaMemset and see if that dies.
// Good - still dies.
// 3 : Try get rid of this one:
// memset(&(p_n_n_shards[iVertex]), 0, sizeof(ShardModel));
// is this what kills it?
// YES! Surprisingly.
// p_AreaMajor[iVertex] = 0.0; // NOTE BENE
// ?
};
// COMMENTED 1
};
}
__global__ void kernelInferMinorDensitiesFromShardModel(
structural * __restrict__ p_info,
nvals * __restrict__ p_n_minor,
ShardModel * __restrict__ p_n_shards,
ShardModel * __restrict__ p_n_shards_n,
LONG3 * __restrict__ p_tri_corner_index,
LONG3 * __restrict__ p_who_am_I_to_corner,
nvals * __restrict__ p_one_over_n
) {
// Assume that we do the simplest thing possible.
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // iMinor OF VERTEX
structural info = p_info[iMinor];
nvals result;
if (iMinor >= BEGINNING_OF_CENTRAL)
{
// if (iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL) printf("\niMinor %d pos %1.10E %1.10E flag %d \n",
// iMinor, info.pos.x, info.pos.y, info.flag);
if (info.flag == DOMAIN_VERTEX) {
result.n = p_n_shards[iMinor - BEGINNING_OF_CENTRAL].n_cent;
result.n_n = p_n_shards_n[iMinor - BEGINNING_OF_CENTRAL].n_cent;
p_n_minor[iMinor] = result;
result.n = 1.0 / result.n;
result.n_n = 1.0 / result.n_n;
p_one_over_n[iMinor] = result;
// We are not being consistent.
// We may wish to use major n here --> minor central n
// We have not done the shard model for target n, we just average and then tween this back.
// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
} else {
// Outermost vertex?
result.n = 0.0;
result.n_n = 0.0;
if (info.flag == OUTERMOST) {
result.n_n = 1.0e18;
result.n = UNIFORM_n_d;
};
p_n_minor[iMinor] = result;
result.n_n = 1.0 / result.n_n;
result.n = 1.0 / result.n;
p_one_over_n[iMinor] = result;
}
} else {
if (info.flag == DOMAIN_TRIANGLE) {
LONG3 tri_corner_index = p_tri_corner_index[iMinor];
LONG3 who_am_I_to_corner = p_who_am_I_to_corner[iMinor];
result.n = THIRD*
(p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1]
+ p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2]
+ p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3]);
result.n_n = THIRD*
(p_n_shards_n[tri_corner_index.i1].n[who_am_I_to_corner.i1]
+ p_n_shards_n[tri_corner_index.i2].n[who_am_I_to_corner.i2]
+ p_n_shards_n[tri_corner_index.i3].n[who_am_I_to_corner.i3]);
p_n_minor[iMinor] = result;
if (TESTTRI) printf("%d: %d %d %d shards n %1.10E %1.10E %1.10E result %1.10E\n",
CHOSEN, tri_corner_index.i1, tri_corner_index.i2, tri_corner_index.i3,
p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1],
p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2],
p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3], result.n);
result.n = THIRD*(
1.0/ p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1]
+ 1.0/ p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2]
+ 1.0/p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3]);
result.n_n = THIRD*
(1.0/p_n_shards_n[tri_corner_index.i1].n[who_am_I_to_corner.i1]
+ 1.0/p_n_shards_n[tri_corner_index.i2].n[who_am_I_to_corner.i2]
+ 1.0/p_n_shards_n[tri_corner_index.i3].n[who_am_I_to_corner.i3]);
p_one_over_n[iMinor] = result;
} else {
if ((info.flag == CROSSING_INS) || (info.flag == CROSSING_CATH)) {
LONG3 tri_corner_index = p_tri_corner_index[iMinor];
LONG3 who_am_I_to_corner = p_who_am_I_to_corner[iMinor];
result.n = 0.0;
result.n_n = 0.0;
structural info1, info2, info3;
info1 = p_info[BEGINNING_OF_CENTRAL + tri_corner_index.i1];
info2 = p_info[BEGINNING_OF_CENTRAL + tri_corner_index.i2];
info3 = p_info[BEGINNING_OF_CENTRAL + tri_corner_index.i3];
int numabove = 0;
if (info1.flag == DOMAIN_VERTEX) {
numabove++;
result.n += p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1];
result.n_n += p_n_shards_n[tri_corner_index.i1].n[who_am_I_to_corner.i1];
};
if (info2.flag == DOMAIN_VERTEX) {
numabove++;
result.n += p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2];
result.n_n += p_n_shards_n[tri_corner_index.i2].n[who_am_I_to_corner.i2];
};
if (info3.flag == DOMAIN_VERTEX) {
numabove++;
result.n += p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3];
result.n_n += p_n_shards_n[tri_corner_index.i3].n[who_am_I_to_corner.i3];
};
if (TESTTRI) printf("%d: %d %d %d C-INS shards n %1.10E %1.10E %1.10E result %1.10E numabove %d\n",
CHOSEN, tri_corner_index.i1, tri_corner_index.i2, tri_corner_index.i3,
p_n_shards[tri_corner_index.i1].n[who_am_I_to_corner.i1],
p_n_shards[tri_corner_index.i2].n[who_am_I_to_corner.i2],
p_n_shards[tri_corner_index.i3].n[who_am_I_to_corner.i3], result.n,
numabove);
result.n /= (f64)numabove;
result.n_n /= (f64)numabove;
p_n_minor[iMinor] = result;
result.n = 1.0 / result.n;
result.n_n = 1.0 / result.n_n;
p_one_over_n[iMinor] = result;
} else {
memset(&(p_n_minor[iMinor]), 0, sizeof(nvals));
}
}
}
}
/*
__global__ void kernelAccumulateDiffusiveHeatRate_new_Longitudinalonly(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
T3 * __restrict__ p_T_major,
T3 * __restrict__ p_T_k,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_kappa_n,
f64 * __restrict__ p_kappa_i,
f64 * __restrict__ p_kappa_e,
f64 * __restrict__ p_nu_i,
f64 * __restrict__ p_nu_e,
NTrates * __restrict__ NTadditionrates,
f64 * __restrict__ p_AreaMajor)
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever];
__shared__ f64 shared_T[threadsPerTileMajorClever]; // +3
//__shared__ f64 shared_T[threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
f64 tempf64[2];
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
}
#endif
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
shared_T[threadIdx.x] = p_T_major[iVertex].Tn;
}
else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid? IMPORTANT
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
f64_vec2 grad_T;
f64 T_anti, T_clock, T_out, T_outk; // 5
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Need this, we are adding on to existing d/dt N,NT :
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
// EXPERIMENT WHETHER IT IS FASTER WITH THESE OUTSIDE OR INSIDE THE BRANCH.
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
// Now do Tn:
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Tn;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Tn;
#endif
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Tn;
#endif
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Tn; // ready for switch around
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
if (T_clock == 0.0) {
#ifdef BWDSIDET
T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_clock = T_outk;
#endif
};
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Tn;
#endif
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Tn; // Stupid 3-struct
// Also need to update T_opp if it was not done already
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Tn;
};
#endif
if (T_anti == 0.0) {
#ifdef BWDSIDET
T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_anti = T_outk;
#endif
}; // So we are receiving 0 then doing this. But how come?
// Now let's see
// tri 0 has neighs 0 and 1 I'm pretty sure (check....) CHECK
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
if (TEST) {
printf("%d contrib %1.8E \n"
"pos_anti %1.9E %1.9E pos_out %1.9E %1.9E pos_clock %1.9E %1.9E\n", iVertex,
0.5*edge_normal.x*THIRD*(pos_anti.x + pos_clock.x
+ info.pos.x + info.pos.x + pos_out.x + pos_out.x),
pos_anti.x, pos_anti.y, pos_out.x, pos_out.y, pos_clock.x, pos_clock.y);
}
// SMARTY:
if (pos_out.x*pos_out.x + pos_out.y*pos_out.y >
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
// How to detect? Loading a load of flags is a killer! We do need to load ... and this is why we should have not made info struct. Def not.
////
//if (insulator triangle)
//{
// centroid1 = THIRD*(pos_anti + pos_out + info.pos);
// // project to radius of insulator
// centroid1.project_to_radius(3.44);
// // Now dot with unit vectors:
// f64_vec2 tempvec2;
// tempvec2.x = unit_vec1.x*centroid1.x + unit_vec1.y*centroid1.y;
// tempvec2.y = unit_vec2.x*centroid1.x + unit_vec2.y*centroid1.y;
// centroid1.x = tempvec2.x;
// centroid1.y = tempvec2.y;
//} else {
// // centroid1 = THIRD*(pos_anti_twist + pos_out_twist);
// centroid1.x = THIRD*(
// unit_vec1.x*(pos_anti.x - info.pos.x) + unit_vec1.y*(pos_anti.y - info.pos.y)
// + unit_vec1.x*(pos_out.x - info.pos.x) + unit_vec1.y*(pos_out.y - info.pos.y)
// );
// centroid1.y = THIRD*(
// - unit_vec1.y*(pos_anti.x - info.pos.x) + unit_vec1.x*(pos_anti.y - info.pos.y)
// - unit_vec1.y*(pos_out.x - info.pos.x) + unit_vec1.x*(pos_out.y - info.pos.y)
// );
//}
//if (insulator triangle)
//{
// centroid2 = THIRD*(pos_clock + pos_out + info.pos);
// // project to radius of insulator
//} else {
//}
kappa_parallel = 0.0;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_n[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_n[izTri[iPrev]];
}
}
if ((pos_clock.x*pos_clock.x + pos_clock.y*pos_clock.y <
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
||
(pos_anti.x*pos_anti.x + pos_anti.y*pos_anti.y <
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other species, make a subroutine.
grad_T.x = 0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifdef BWDSIDET
T_clock = T_out;
T_out = T_anti;
#else
T_clock = T_outk;
T_outk = T_anti;
#endif
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_i + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_i + 2 * iVertex, 2 * sizeof(f64));
}
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_major[iVertex].Ti;
// Notice major inefficiency caused by not making them scalar T arrays
}
else {
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Ti;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Ti;
#endif
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Ti;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Ti;
#endif
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
if (T_clock == 0.0) {
#ifdef BWDSIDET
T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_clock = T_outk;
#endif
};
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Ti;
#endif
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Ti;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Ti;
};
#endif
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
if (T_anti == 0.0) {
#ifdef BWDSIDET
T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_anti = T_outk;
#endif
}; // So we are receiving 0 then doing this. But how come?
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (pos_out.x*pos_out.x + pos_out.y*pos_out.y >
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
//f64 Area_quadrilateral = 0.5*(
// (info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
// + (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
// + (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
// + (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
// );
//grad_T.x = 0.5*(
// (shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
// + (T_out + T_clock)*(pos_out.y - pos_clock.y)
// + (T_anti + T_out)*(pos_anti.y - pos_out.y)
// ) / Area_quadrilateral;
//grad_T.y = -0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
// + (T_out + T_clock)*(pos_out.x - pos_clock.x)
// + (T_anti + T_out)*(pos_anti.x - pos_out.x)
// ) / Area_quadrilateral;
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_i[izTri[iNeigh]];
nu = 0.5*p_nu_i[izTri[iNeigh]];
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_i[izTri[iPrev]];
nu += 0.5*p_nu_i[izTri[iPrev]];
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
//
// ourrates.NiTi += TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega));
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
ourrates.NiTi += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
if (TEST) printf("%d iNeigh %d kappa_ion %1.8E nu %1.8E |o| %1.8E contrib %1.8E \n",
iVertex, iNeigh, kappa_parallel, nu,
omega.modulus(),
TWOTHIRDS * kappa_parallel *(
edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
+ edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
) / (nu * nu + omega.dot(omega))
);
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifndef BWDSIDET
T_clock = T_outk;
T_outk = T_anti;
#else
T_clock = T_out;
T_out = T_anti;
#endif
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_e + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_e + 2 * iVertex, 2 * sizeof(f64));
}
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_major[iVertex].Te;
}
else {
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Te;
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Te;
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
if (T_clock == 0.0) {
#ifdef BWDSIDET
T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_clock = T_outk;
#endif
};
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Te;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Te;
}
#endif
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
if (T_anti == 0.0) {
#ifdef BWDSIDET
T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
#else
T_anti = T_outk;
#endif
}; // So we are receiving 0 then doing this. But how come?
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
if (TEST) {
printf("%d : %d endpt_anti %1.9E %1.9E SHARED endpt_clock %1.9E %1.9E izTri[iNeigh] %d\n",
iVertex, iNeigh, endpt_anti.x, endpt_anti.y, endpt_clock.x, endpt_clock.y, izTri[iNeigh]);
}
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
if (TEST) {
printf("%d : %d endpt_anti %1.9E %1.9E GLOBAL endpt_clock %1.9E %1.9E izTri[iNeigh] %d\n",
iVertex, iNeigh, endpt_anti.x, endpt_anti.y, endpt_clock.x, endpt_clock.y, izTri[iNeigh]);
}
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
// It decided to rotate something it shouldn't oughta. Rotated tri 23600 = tri 2 for 11582.
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (pos_out.x*pos_out.x + pos_out.y*pos_out.y >
DEVICE_RADIUS_INSULATOR_OUTER*DEVICE_RADIUS_INSULATOR_OUTER)
{
// f64 grad_out = (T_out - shared_T[threadIdx.x]) / delta_0out;
//f64 Area_quadrilateral = 0.5*(
// (info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
// + (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
// + (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
// + (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
// );
//grad_T.x = 0.5*(
// (shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
// + (T_out + T_clock)*(pos_out.y - pos_clock.y)
// + (T_anti + T_out)*(pos_anti.y - pos_out.y)
// ) / Area_quadrilateral;
//grad_T.y = -0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
// + (T_out + T_clock)*(pos_out.x - pos_clock.x)
// + (T_anti + T_out)*(pos_anti.x - pos_out.x)
// ) / Area_quadrilateral;
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_e[izTri[iNeigh]];
nu = 0.5*p_nu_e[izTri[iNeigh]];
};
if (TEST) printf("izTri %d kappa_par %1.9E \n",
izTri[iNeigh], p_kappa_e[izTri[iNeigh]]);
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_e[izTri[iPrev]];
nu += 0.5*p_nu_e[izTri[iPrev]];
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
// ourrates.NeTe += TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y - nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y + nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega));
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
ourrates.NeTe += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
// Expensive debug: remove!
if (TESTHEAT2) printf(
"iVertex %d iNeigh %d %d contribNeTe %1.9E edge_normal %1.8E %1.8E \n"
"T %1.9E Tout %1.9E T_anti %1.9E T_clock %1.9E\n"
" kappa_par %1.9E nu %1.9E |omega| %1.9E Area %1.9E\n"
"our_n %1.9E our n_n %1.9E nearby n %1.9E %1.9E\n"
"pos %1.8E %1.8E opp %1.8E %1.8E anti %1.8E %1.8E clock %1.8E %1.8E\n"
"omega %1.8E %1.8E grad_T %1.9E %1.9E \n"
"=================================================\n",
iVertex, iNeigh, indexneigh,
TWOTHIRDS * kappa_parallel *(
edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y - nu * omega.z)*grad_T.y)
+ edge_normal.y*((omega.x*omega.y + nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
) / (nu * nu + omega.dot(omega)),
edge_normal.x, edge_normal.y, shared_T[threadIdx.x], T_out, T_anti, T_clock,
kappa_parallel, nu, sqrt(omega.dot(omega)),
p_AreaMajor[iVertex],
p_n_major[iVertex].n, p_n_major[iVertex].n_n, p_n_major[indexneigh].n, p_n_major[indexneigh].n_n,
info.pos.x, info.pos.y, pos_out.x, pos_out.y, pos_anti.x, pos_anti.y, pos_clock.x, pos_clock.y,
omega.x, omega.y, grad_T.x, grad_T.y);
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifdef BWDSIDET
T_clock = T_out;
T_out = T_anti;
#else
T_clock = T_outk;
T_outk = T_anti;
#endif
}; // next iNeigh
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
}*/
__global__ void kernelAccumulateDiffusiveHeatRate_new_Longitudinalonly_scalarT(
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_T_n, f64 * __restrict__ p_T_i, f64 * __restrict__ p_T_e,
// T3 * __restrict__ p_T_k,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_kappa_n,
f64 * __restrict__ p_kappa_i,
f64 * __restrict__ p_kappa_e,
f64 * __restrict__ p_nu_i,
f64 * __restrict__ p_nu_e,
NTrates * __restrict__ NTadditionrates,
f64 * __restrict__ p_AreaMajor,
bool * __restrict__ p_maskbool3,
bool * __restrict__ p_maskblock,
bool bUseMask)
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
// DO NOT WANT:
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever];
__shared__ f64 shared_T[threadsPerTileMajorClever]; // +3
//__shared__ f64 shared_T[threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask[3];
if (bUseMask)
if (p_maskblock[blockIdx.x] == 0) return;
if (bUseMask) {
bMask[0] = p_maskbool3[iVertex];
bMask[1] = p_maskbool3[iVertex + NUMVERTICES];
bMask[2] = p_maskbool3[iVertex + NUMVERTICES*2];
//memcpy(bMask, p_maskbool3 + iVertex * 3, 3 * sizeof(bool));
}
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
f64 tempf64[2];
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
}
#endif
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
shared_T[threadIdx.x] = p_T_n[iVertex];
} else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid?
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
f64_vec2 grad_T;
f64 T_anti, T_clock, T_out, T_outk; // 5
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
if ( (info.flag == DOMAIN_VERTEX) && (
((bUseMask == 0) || (bMask[0] == true) || (bMask[1] == true) || (bMask[2] == true))))
{
// Need this, we are adding on to existing d/dt N,NT :
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
};
if ((bUseMask == 0) || (bMask[0] == true) )
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Now do Tn:
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
T_clock = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
T_clock = p_T_n[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
T_out = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
T_out = p_T_n[indexneigh]; // saved nothing here, only in loading
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
T_anti = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
T_anti = p_T_n[indexneigh];
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
//if (T_anti == 0.0) {
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//}; // So we are receiving 0 then doing this. But how come?
//Mimic
// Now let's see
// tri 0 has neighs 0 and 1 I'm pretty sure (check....) CHECK
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
if (0) {
printf("%d contrib %1.8E \n"
"pos_anti %1.9E %1.9E pos_out %1.9E %1.9E pos_clock %1.9E %1.9E\n",
iVertex,
0.5*edge_normal.x*THIRD*(pos_anti.x + pos_clock.x
+ info.pos.x + info.pos.x + pos_out.x + pos_out.x),
pos_anti.x, pos_anti.y, pos_out.x, pos_out.y, pos_clock.x, pos_clock.y);
}
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_n[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_n[izTri[iPrev]];
}
}
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other species, make a subroutine.
grad_T.x = 0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
}
// This is correct, grad T in same coordinates as edge_normal...
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
T_clock = T_out;
T_out = T_anti;
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
}; // mask
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
#pragma unroll
for (int iSpecies = 1; iSpecies < 3; iSpecies++)
{
if (iSpecies == 1)
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_i + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_i + 2 * iVertex, 2 * sizeof(f64));
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_i[iVertex];
// Notice major inefficiency caused by not making them scalar T arrays
}
else {
shared_T[threadIdx.x] = 0.0;
}
}
else {
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_e + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_e + 2 * iVertex, 2 * sizeof(f64));
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_e[iVertex];
// Notice major inefficiency caused by not making them scalar T arrays
}
else {
shared_T[threadIdx.x] = 0.0;
}
};
// Maybe this alone means combining the ion & electron code was stupid. Maybe it can't make contig access.
__syncthreads();
if ((bUseMask == 0) || (bMask[iSpecies] == true)) // either there is no masking, or this is switched on
{
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
T_clock = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
if (iSpecies == 1) {
T_clock = p_T_i[indexneigh];
}
else {
T_clock = p_T_e[indexneigh];
};
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
T_out = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
if (iSpecies == 1) {
T_out = p_T_i[indexneigh];
}
else {
T_out = p_T_e[indexneigh];
};
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
T_anti = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
if (iSpecies == 1)
{
T_anti = p_T_i[indexneigh];
}
else {
T_anti = p_T_e[indexneigh];
};
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
// if (T_anti == 0.0) {
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
// }; // So we are receiving 0 then doing this. But how come?
// BUG -- masked stuff will go wrong.
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
//f64 Area_quadrilateral = 0.5*(
// (info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
// + (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
// + (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
// + (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
// );
//grad_T.x = 0.5*(
// (shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
// + (T_out + T_clock)*(pos_out.y - pos_clock.y)
// + (T_anti + T_out)*(pos_anti.y - pos_out.y)
// ) / Area_quadrilateral;
//grad_T.y = -0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
// + (T_out + T_clock)*(pos_out.x - pos_clock.x)
// + (T_anti + T_out)*(pos_anti.x - pos_out.x)
// ) / Area_quadrilateral;
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
if (iSpecies == 1) {
kappa_parallel = 0.5*p_kappa_i[izTri[iNeigh]];
nu = 0.5*p_nu_i[izTri[iNeigh]];
}
else {
kappa_parallel = 0.5*p_kappa_e[izTri[iNeigh]];
nu = 0.5*p_nu_e[izTri[iNeigh]];
};
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
if (iSpecies == 1) {
kappa_parallel += 0.5*p_kappa_i[izTri[iPrev]];
nu += 0.5*p_nu_i[izTri[iPrev]];
}
else {
kappa_parallel += 0.5*p_kappa_e[izTri[iPrev]];
nu += 0.5*p_nu_e[izTri[iPrev]];
};
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega;
if (iSpecies == 1) {
omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
}
else {
omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
};
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
//
// ourrates.NiTi += TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega));
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
// We don't need test for T == 0 because we don't use anti or clock
// and we ruled out looking into insulator.
if (iSpecies == 1) {
ourrates.NiTi += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
//if (iVertex == VERTCHOSEN) {
// printf("iVertex %d T_out T_our %1.10E %1.10E contrib %1.10E\n",
// iVertex, T_out, shared_T[threadIdx.x],
// TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
// (nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
// / (delta_out*edgelen *(nu * nu + omega.dot(omega)))
// );
//}
} else {
ourrates.NeTe += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
};
if ((TESTHEAT1) && (iSpecies == 2))
printf("%d iNeigh %d %d e factor %1.12E contrib %1.12E T_out %1.12E T_self %1.12E\n",
iVertex, iNeigh, indexneigh,
TWOTHIRDS * kappa_parallel *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega))),
(T_out - shared_T[threadIdx.x])*
TWOTHIRDS * kappa_parallel *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega))),
T_out, shared_T[threadIdx.x]
);
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
T_clock = T_out;
T_out = T_anti;
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
}; // debug
}; // mask
__syncthreads();
};
if ((TESTHEAT1)) printf("%d ourrates.NeTe %1.10E \n", iVertex, ourrates.NeTe);
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
// It was not necessarily sensible to combine ion and electron
// However, it is quite daft having a separate routine for vector2 grad T (??)
}
__global__ void kernelAccumulateDiffusiveHeatRate_new_Longitudinalonly_1species(
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p__T,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p__kappa,
f64 * __restrict__ p__nu,
NTrates * __restrict__ NTadditionrates,
f64 * __restrict__ p_AreaMajor,
bool * __restrict__ p_maskbool,
bool * __restrict__ p_maskblock,
bool bUseMask,
int iSpecies)
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
// DO NOT WANT:
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever]; // but as far as we know, we are having to use circumcenters.
// Maybe it works without them now that we have the longitudinal assumptions --- don't know for sure.
// But it means we are not being consistent with our definition of a cell?
// Like having major cells Voronoi => velocity living on centroids (which it must, for visc + A) is in slightly the wrong place.
__shared__ f64 shared_T[threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask;
if (bUseMask)
if (p_maskblock[blockIdx.x] == 0) return;
if (bUseMask) bMask = p_maskbool[iVertex];
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
}
#endif
memcpy(&(shared_nu[threadIdx.x * 2]), p__nu + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_kappa[threadIdx.x * 2]), p__kappa + 2 * iVertex, 2 * sizeof(f64));
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
shared_T[threadIdx.x] = p__T[iVertex];
}
else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid?
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
f64_vec2 grad_T;
f64 T_anti, T_clock, T_out, T_outk; // 5
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
if ((info.flag == DOMAIN_VERTEX) && ((bUseMask == 0) || (bMask == true)))
{
// Need this, we are adding on to existing d/dt N,NT :
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Now do Tn:
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
T_clock = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
T_clock = p__T[indexneigh];
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
T_out = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
T_out = p__T[indexneigh]; // saved nothing here, only in loading
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
// if (T_clock == 0.0) {
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
T_anti = shared_T[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
T_anti = p__T[indexneigh];
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
//if (T_anti == 0.0) {
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//}; // So we are receiving 0 then doing this. But how come?
//Mimic
// Now let's see
// tri 0 has neighs 0 and 1 I'm pretty sure (check....) CHECK
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
f64 nu;
if (iSpecies == 0) {
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p__kappa[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p__kappa[izTri[iPrev]];
}
}
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other iSpecies, make a subroutine.
grad_T.x = 0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
};
// This is correct, grad T in same coordinates as edge_normal...
}
else {
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p__kappa[izTri[iNeigh]];
nu = 0.5*p__nu[izTri[iNeigh]];
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p__kappa[izTri[iPrev]];
nu += 0.5*p__nu[izTri[iPrev]];
};
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
{ // scoping brace
f64_vec3 omega;
if (iSpecies == 1) {
omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
}
else {
omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
};
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
if (iSpecies == 1) {
ourrates.NiTi += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
}
else {
ourrates.NeTe += TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
if (TESTHEAT)
printf("%d %d iSpecies %d contrib %1.10E kappa_par %1.9E\nT_out %1.9E T %1.9E nu %1.9E omega %1.9E %1.9E\n", iVertex, iNeigh, iSpecies,
TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega))),
kappa_parallel, T_out, shared_T[threadIdx.x], nu, omega.x, omega.y
);
};
}
}; // if iSpecies == 0
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
T_clock = T_out;
T_out = T_anti;
}; // next iNeigh
}; // DOMAIN vertex active in mask
// Turned out to be stupid having a struct called NTrates. We just want to modify one scalar at a time.
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
}
#include "heatflux.cu"
;
__global__ void kernelCalc_SelfCoefficient_for_HeatConduction
(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_kappa_n,
f64 * __restrict__ p_kappa_i,
f64 * __restrict__ p_kappa_e,
f64 * __restrict__ p_nu_i,
f64 * __restrict__ p_nu_e,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p_coeffself_n,
f64 * __restrict__ p_coeffself_i,
f64 * __restrict__ p_coeffself_e // outputs
)
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
// DO NOT WANT:
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
f64 tempf64[2];
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
}
#endif
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
}
else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid?
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
}
__syncthreads();
f64_vec2 grad_T;
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
memset(&ourrates, 0, sizeof(NTrates));
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Need this, we are adding on to existing d/dt N,NT :
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
// EXPERIMENT WHETHER IT IS FASTER WITH THESE OUTSIDE OR INSIDE THE BRANCH.
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_n[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_n[izTri[iPrev]];
}
}
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(-1.0) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other species, make a subroutine.
//grad_T.x = 0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
// + (T_out + T_clock)*(pos_out.y - pos_clock.y)
// + (T_anti + T_out)*(pos_anti.y - pos_out.y)
// ) / Area_quadrilateral;
//grad_T.y = -0.5*( // notice minus
// (shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
// + (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
// + (T_out + T_clock)*(pos_out.x - pos_clock.x)
// + (T_anti + T_out)*(pos_anti.x - pos_out.x)
// ) / Area_quadrilateral;
grad_T.x = 0.5*(pos_clock.y - pos_anti.y) / Area_quadrilateral;
grad_T.y = -0.5*(pos_clock.x - pos_anti.x) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
}
// This is correct, grad T in same coordinates as edge_normal...
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
#pragma unroll
for (int iSpecies = 1; iSpecies < 3; iSpecies++)
{
if (iSpecies == 1)
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_i + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_i + 2 * iVertex, 2 * sizeof(f64));
}
else {
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_e + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_e + 2 * iVertex, 2 * sizeof(f64));
};
// Maybe this alone means combining the ion & electron code was stupid. Maybe it can't make contig access.
__syncthreads();
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
if (iSpecies == 1) {
kappa_parallel = 0.5*p_kappa_i[izTri[iNeigh]];
nu = 0.5*p_nu_i[izTri[iNeigh]];
}
else {
kappa_parallel = 0.5*p_kappa_e[izTri[iNeigh]];
nu = 0.5*p_nu_e[izTri[iNeigh]];
};
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
if (iSpecies == 1) {
kappa_parallel += 0.5*p_kappa_i[izTri[iPrev]];
nu += 0.5*p_nu_i[izTri[iPrev]];
}
else {
kappa_parallel += 0.5*p_kappa_e[izTri[iPrev]];
nu += 0.5*p_nu_e[izTri[iPrev]];
};
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
{ // scoping brace
f64_vec3 omega;
if (iSpecies == 1) {
omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
}
else {
omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
};
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
//
// ourrates.NiTi += TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega));
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
if (iSpecies == 1) {
ourrates.NiTi += TWOTHIRDS * kappa_parallel * (-1.0) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
} else {
ourrates.NeTe += TWOTHIRDS * kappa_parallel * (-1.0) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen *(nu * nu + omega.dot(omega)));
};
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
__syncthreads();
};
// Now compute self coeff from d/dself dNT/dt
nvals n_use = p_n_major[iVertex];
f64 AreaMajor = p_AreaMajor[iVertex];
f64 Nn = n_use.n_n*AreaMajor;
f64 N = n_use.n *AreaMajor;
p_coeffself_n[iVertex] = sqrt(Nn) - (h_use / sqrt(Nn))*ourrates.NnTn; // ourrates is ROC epsilon wrt self
p_coeffself_i[iVertex] = sqrt(N) - (h_use / sqrt(N))*ourrates.NiTi; // ourrates is ROC epsilon wrt self
p_coeffself_e[iVertex] = sqrt(N) - (h_use / sqrt(N))*ourrates.NeTe; // ourrates is ROC epsilon wrt self
// NOTE BENE THAT WE UPDATED THIS IN VIEW OF:
// epsilon *= sqrt(N);
if ((iVertex == VERTCHOSEN) || (iVertex == VERTCHOSEN2))
printf("%d coeffself (1-h/N rates) %1.10E Rates %1.10E h/N %1.10E\n\n",
iVertex, p_coeffself_n[iVertex], ourrates.NnTn, h_use / N);
//if (iVertex == VERTCHOSEN) printf("iVertex %d coeffself_i %1.10E \n", iVertex, p_coeffself_i[iVertex]);
// ourrates is negative so this is > 1.
}
__global__ void kernelPowerminushalf
(f64 * __restrict__ p_input, f64 * __restrict__ p_output)
{
long const index = threadIdx.x + blockIdx.x*blockDim.x;
p_output[index] = 1.0 / sqrt(p_input[index]);
}
__global__ void kernelVolleyRegressors(
f64 * __restrict__ p_regress,
long const Length,
char * __restrict__ p_iVolley
) {
long const iVertex = threadIdx.x + blockDim.x*blockIdx.x;
// p_regress is regr+NUMVERTICES. That is the position of epsilon.
char cVolley = p_iVolley[iVertex];
f64 regr1 = p_regress[iVertex];
f64 regr2 = p_regress[iVertex + Length];
f64 regr3 = regr1*((cVolley == 1)? 1 : 0);
f64 regr4 = regr2*((cVolley == 1) ? 1 : 0);
f64 regr5 = regr1*((cVolley == 2) ? 1 : 0);
f64 regr6 = regr2*((cVolley == 2)? 1 : 0);
f64 regr7 = regr1*((cVolley > 2)? 1 : 0);
f64 regr8 = regr2*((cVolley > 2) ? 1 : 0);
regr1 = regr1*((cVolley == 0) ? 1 : 0);
regr2 = regr2*((cVolley == 0) ? 1 : 0);
p_regress[iVertex] = regr1;
p_regress[iVertex + Length] = regr2;
p_regress[iVertex + 2 * Length] = regr3;
p_regress[iVertex + 3 * Length] = regr4;
p_regress[iVertex + 4 * Length] = regr5;
p_regress[iVertex + 5 * Length] = regr6;
// No thanks: leave 7 free
//p_regress[iVertex + 6 * Length] = regr7;
//p_regress[iVertex + 7 * Length] = regr8;
}
__global__ void kernelCreateEpsilonHeat
(
f64 const hsub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_eps_n,
f64 * __restrict__ p_eps_i,
f64 * __restrict__ p_eps_e,
f64 * __restrict__ p_NT_n,
f64 * __restrict__ p_NT_i,
f64 * __restrict__ p_NT_e,
T3 * __restrict__ p_T_k,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major,
NTrates * __restrict__ NTadditionrates, // it's especially silly having a whole struct of 5 instead of 3 here.
bool * __restrict__ p_b_Failed,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskblock,
bool bUseMask
)
{
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
bool bMask[3];
if (bUseMask) {
//memcpy(bMask, p_bMask3 + 3 * iVertex, sizeof(bool) * 3); // until we break out species!
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + 2*NUMVERTICES];
if ((bMask[0] == 0) && (bMask[1] == 0) && (bMask[2] == 0)) return;
};
structural info = p_info_major[iVertex];
if (info.flag == DOMAIN_VERTEX) {
T3 T_k = p_T_k[iVertex];
NTrates ourrates;
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
f64 AreaMajor = p_AreaMajor[iVertex];
nvals n = p_n_major[iVertex];
f64 epsilon_n, epsilon_i, epsilon_e;
bool bFail = false;
if (bMask[0] || (bUseMask == 0)) {
f64 sqrtNn = sqrt(AreaMajor*n.n_n);
f64 NnTn = p_NT_n[iVertex]; // means sqrtN T
epsilon_n = NnTn - T_k.Tn*sqrtNn - (hsub / sqrtNn)*ourrates.NnTn;
if (epsilon_n*epsilon_n > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(NnTn*NnTn + 1.0e-10*1.0e-10)) bFail = true;
// Note that ourrates already included the factor 1/sqrtN on our own sqrt(N)T
} else {
epsilon_n = 0.0;
};
if (bMask[1] || (bUseMask == 0)) {
f64 sqrtN = sqrt(AreaMajor*n.n);
f64 NTi = p_NT_i[iVertex];
epsilon_i = NTi - T_k.Ti*sqrtN - (hsub / sqrtN)*ourrates.NiTi;
if (epsilon_i*epsilon_i > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(NTi*NTi + 1.0e-10*1.0e-10)) bFail = true;
if (iVertex == VERTCHOSEN) printf("%d NTi %1.10E sqrtN Tk %1.10E hsub / sqrtN %1.10E NiTi %1.10E eps_i %1.10E sqrtN %1.10E\n",
iVertex, NTi, T_k.Ti*sqrtN, hsub / sqrtN, ourrates.NiTi, epsilon_i, sqrtN);
} else {
epsilon_i = 0.0;
};
if (bMask[2] || (bUseMask == 0)) {
f64 sqrtN = sqrt(AreaMajor*n.n);
f64 NTe = p_NT_e[iVertex]; // is this sqrtN T ?
epsilon_e = NTe - T_k.Te*sqrtN - (hsub / sqrtN)*ourrates.NeTe;
if (epsilon_e*epsilon_e > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(NTe*NTe + 1.0e-10*1.0e-10)) bFail = true;
} else {
epsilon_e = 0.0;
};
if (epsilon_n != epsilon_n)printf("epsilon_n NaN iVertex %d n_n %1.10E Area %1.10E \n",
iVertex, n.n_n, AreaMajor);
p_eps_n[iVertex] = epsilon_n;
p_eps_i[iVertex] = epsilon_i;
p_eps_e[iVertex] = epsilon_e;
if (p_b_Failed != 0) {
if (bFail)
p_b_Failed[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
};
} else {
p_eps_n[iVertex] = 0.0;
p_eps_i[iVertex] = 0.0;
p_eps_e[iVertex] = 0.0;
};
}
__global__ void kernelSelectivelyZeroNTrates(
NTrates * __restrict__ NTadditionrates,
bool * __restrict__ p_bMask3
) {
long const iVertex = threadIdx.x + blockIdx.x*blockDim.x;
bool bMask[3];
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + 2 * NUMVERTICES];
if ((bMask[0] == 0) && (bMask[1] == 0) && (bMask[2] == 0)) return;
NTrates dNTbydt;
memcpy(&dNTbydt, &(NTadditionrates[iVertex]), sizeof(NTrates));
if (bMask[0]) dNTbydt.NnTn = 0.0;
if (bMask[1]) dNTbydt.NiTi = 0.0;
if (bMask[2]) dNTbydt.NeTe = 0.0;
memcpy(&(NTadditionrates[iVertex]), &dNTbydt, sizeof(NTrates));
}
__global__ void kernelCreateEpsilonHeat_Equilibrated
(
f64 const hsub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_eps_n,
f64 * __restrict__ p_eps_i,
f64 * __restrict__ p_eps_e,
f64 * __restrict__ p_sqrtDNT_n,
f64 * __restrict__ p_sqrtDNT_i,
f64 * __restrict__ p_sqrtDNT_e,
T3 * __restrict__ p_T_k,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_invsqrtD_n,
f64 * __restrict__ p_invsqrtD_i,
f64 * __restrict__ p_invsqrtD_e,
NTrates * __restrict__ NTadditionrates, // it's especially silly having a whole struct of 5 instead of 3 here.
bool * __restrict__ p_b_Failed,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskblock,
bool bUseMask
)
{
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
bool bMask[3];
if (bUseMask) {
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + 2*NUMVERTICES];
if ((bMask[0] == 0) && (bMask[1] == 0) && (bMask[2] == 0)) return;
};
structural info = p_info_major[iVertex];
if (info.flag == DOMAIN_VERTEX) {
T3 T_k = p_T_k[iVertex];
NTrates ourrates;
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
f64 AreaMajor = p_AreaMajor[iVertex];
nvals n = p_n_major[iVertex];
f64 epsilon_n, epsilon_i, epsilon_e;
bool bFail = false;
if ((bMask[0]) || (bUseMask == 0)) {
f64 sqrtNn = sqrt(AreaMajor*n.n_n);
f64 sqrtDN_T = p_sqrtDNT_n[iVertex]; // means sqrtDN T
f64 sqrtDinv = p_invsqrtD_n[iVertex];
// epsilon_i = NTi - T_k.Ti*sqrtN - (hsub / sqrtN)*ourrates.NiTi;
// Multiply epsilon by D^-1/2 and
// wherever a sqrt(DN)T appears multiply it by D_j^-1/2 to give sqrt(N)T
// The multiplication D_j^-1/2 was already included in T -> ourrates
epsilon_n = sqrtDinv*sqrtDinv*sqrtDN_T
- sqrtDinv*(hsub / sqrtNn)*ourrates.NnTn
- sqrtDinv*T_k.Tn*sqrtNn;
f64 test_epsilon = epsilon_n / sqrtDinv; // divides take long.
f64 sqrtNn_Tn = sqrtDinv*sqrtDN_T;
if (test_epsilon*test_epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(sqrtNn_Tn*sqrtNn_Tn + 1.0e-10*1.0e-10)) bFail = true;
// Let's be careful about that threshold. It's for N T^2.
// sqrt(N) that we care about ~ 1e4. T that we care about ~ 1e-14. We then go REL_THRESH*that.
// Note that ourrates already included the factor 1/sqrtN on our own sqrt(N)T
} else {
epsilon_n = 0.0;
};
if ((bMask[1]) || (bUseMask == 0)) {
f64 sqrtN = sqrt(AreaMajor*n.n);
f64 sqrtDN_T = p_sqrtDNT_i[iVertex]; // means sqrtDN T
f64 sqrtDinv = p_invsqrtD_i[iVertex];
epsilon_i = sqrtDinv*sqrtDinv*sqrtDN_T
- sqrtDinv*(hsub / sqrtN)*ourrates.NiTi
- sqrtDinv*T_k.Ti*sqrtN;
//epsilon_i = NTi - T_k.Ti*sqrtN - (hsub / sqrtN)*ourrates.NiTi;
f64 test_epsilon = epsilon_i / sqrtDinv;
f64 sqrtN_Ti = sqrtDinv*sqrtDN_T;
if (test_epsilon*test_epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(sqrtN_Ti*sqrtN_Ti + 1.0e-10*1.0e-10)) bFail = true;
}
else {
epsilon_i = 0.0;
};
if ((bMask[2]) || (bUseMask == 0)) {
f64 sqrtN = sqrt(AreaMajor*n.n);
f64 sqrtDN_T = p_sqrtDNT_e[iVertex]; // means sqrtDN T
f64 sqrtDinv = p_invsqrtD_e[iVertex];
epsilon_e = sqrtDinv*sqrtDinv*sqrtDN_T
- sqrtDinv*(hsub / sqrtN)*ourrates.NeTe
- sqrtDinv*T_k.Te*sqrtN;
// epsilon_e = NTe - T_k.Te*sqrtN - (hsub / sqrtN)*ourrates.NeTe;
f64 test_epsilon = epsilon_e / sqrtDinv;
f64 sqrtN_Te = sqrtDinv*sqrtDN_T;
if (test_epsilon*test_epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(sqrtN_Te*sqrtN_Te + 1.0e-10*1.0e-10)) bFail = true;
}
else {
epsilon_e = 0.0;
};
// if (TEST) printf("%d epsilon_e %1.8E NTe %1.8E nete %1.8E\n",
// iVertex, epsilon_e, NTe, ourrates.NeTe);
if (epsilon_n != epsilon_n)printf("epsilon_n NaN iVertex %d n_n %1.10E Area %1.10E \n",
iVertex, n.n_n, AreaMajor);
p_eps_n[iVertex] = epsilon_n;
p_eps_i[iVertex] = epsilon_i;
p_eps_e[iVertex] = epsilon_e;
if (p_b_Failed != 0) {
if (bFail)
p_b_Failed[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
};
}
else {
p_eps_n[iVertex] = 0.0;
p_eps_i[iVertex] = 0.0;
p_eps_e[iVertex] = 0.0;
};
}
__global__ void kernelCreateEpsilonHeatOriginalScaling
(
f64 const hsub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_eps_n,
f64 * __restrict__ p_eps_i,
f64 * __restrict__ p_eps_e,
f64 * __restrict__ p_T_n,
f64 * __restrict__ p_T_i,
f64 * __restrict__ p_T_e,
T3 * __restrict__ p_T_k,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major,
NTrates * __restrict__ NTadditionrates ,// it's especially silly having a whole struct of 5 instead of 3 here.
bool * __restrict__ bTest
)
{
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
structural info = p_info_major[iVertex];
if (info.flag == DOMAIN_VERTEX) {
f64 Tn = p_T_n[iVertex];
f64 Ti = p_T_i[iVertex];
f64 Te = p_T_e[iVertex];
T3 T_k = p_T_k[iVertex];
f64 AreaMajor = p_AreaMajor[iVertex];
nvals n = p_n_major[iVertex];
NTrates ourrates;
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
f64 Nn = (AreaMajor*n.n_n);
f64 epsilon_n = Tn - T_k.Tn - (hsub / Nn)*ourrates.NnTn;
f64 N = (AreaMajor*n.n);
f64 epsilon_i = Ti - T_k.Ti - (hsub / N)*ourrates.NiTi;
f64 epsilon_e = Te - T_k.Te - (hsub / N)*ourrates.NeTe;
p_eps_n[iVertex] = epsilon_n;
p_eps_i[iVertex] = epsilon_i;
p_eps_e[iVertex] = epsilon_e;
if ((epsilon_n*epsilon_n > 1.0e-24*(Tn*Tn + 1.0e-14*1.0e-14))
|| (epsilon_i*epsilon_i > 1.0e-24*(Ti*Ti + 1.0e-14*1.0e-14))
|| (epsilon_e*epsilon_e > 1.0e-24*(Te*Te + 1.0e-14*1.0e-14))
)
bTest[blockIdx.x] = true;
}
else {
p_eps_n[iVertex] = 0.0;
p_eps_i[iVertex] = 0.0;
p_eps_e[iVertex] = 0.0;
}
}
__global__ void kernelAccumulateDiffusiveHeatRate_new_Full(
structural * __restrict__ p_info_minor,
long * __restrict__ pIndexNeigh,
char * __restrict__ pPBCNeigh,
long * __restrict__ izTri_verts,
char * __restrict__ szPBCtri_verts,
f64_vec2 * __restrict__ p_cc,
nvals * __restrict__ p_n_major,
T3 * __restrict__ p_T_major,
//T3 * __restrict__ p_T_putative,
bool * __restrict__ p_bool_longi,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_kappa_n,
f64 * __restrict__ p_kappa_i,
f64 * __restrict__ p_kappa_e,
f64 * __restrict__ p_nu_i,
f64 * __restrict__ p_nu_e,
NTrates * __restrict__ NTadditionrates,
f64 * __restrict__ p_AreaMajor,
bool bCheckWhetherToDoctorUp,
bool * __restrict__ p_maskbool3,
bool * __restrict__ p_maskblock,
bool bUseMask
//T3 * __restrict__ p_T_putative
) // test whether we are pushing heat uphill...
{
// Think we might as well take kappa_par and nu from triangles really.
// If n is modelled well then hopefully a nearby high-n does not have a big impact.
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajorClever]; // 2
__shared__ f64_vec2 shared_pos[2 * threadsPerTileMajorClever];
__shared__ f64 shared_T[threadsPerTileMajorClever]; // +3
//__shared__ f64 shared_T[threadsPerTileMajorClever];
__shared__ f64_vec2 shared_B[threadsPerTileMajorClever]; // +2
// B is smooth. Unfortunately we have not fitted in Bz here.
// In order to do that perhaps rewrite so that variables are overwritten in shared.
// We do not need all T and nu in shared at the same time.
// This way is easier for NOW.
__shared__ f64 shared_kappa[threadsPerTileMajorClever * 2];
__shared__ f64 shared_nu[threadsPerTileMajorClever * 2];
__shared__ long Indexneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // assume 48 bytes = 4*12 = 6 doubles
__shared__ char PBCneigh[MAXNEIGH_d*threadsPerTileMajorClever]; // 12 bytes each from L1. Have 42 per thread at 384 threads.
// We should at any rate try a major block of size 256. If 1 block will run at a time, so be it.
// Leave L1 in case of register overflow into it. <-- don't know how likely - do we only have 31 doubles in registry
// regardless # of threads and space? Or can be 63?
__shared__ char PBCtri[MAXNEIGH_d*threadsPerTileMajorClever];
// Balance of shared vs L1: 24*256*8 = 48K. That leaves 8 doublesworth in L1 for variables.
long izTri[MAXNEIGH_d]; // so only 2 doubles left in L1. 31 in registers??
// Set threadsPerTileMajorClever to 256.
// It would help matters if we get rid of T3. We might as well therefore change to scalar flatpack T.
// We are hoping that it works well loading kappa(tri) and that this is not upset by nearby values. Obviously a bit of an experiment.
// Does make it somewhat laughable that we go to such efforts to reduce global accesses when we end up overflowing anyway.
// If we can fit 24 doubles/thread in 48K that means we can fit 8 doubles/thread in 16K so that's most of L1 used up.
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
long const StartMinor = blockIdx.x*blockDim.x * 2;
long const EndMinor = StartMinor + blockDim.x * 2;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask[3];
if (bUseMask)
if (p_maskblock[blockIdx.x] == 0) return;
if (bUseMask) {
//memcpy(bMask, p_maskbool3 + iVertex * 3, 3 * sizeof(bool));
bMask[0] = p_maskbool3[iVertex];
bMask[1] = p_maskbool3[iVertex + NUMVERTICES];
bMask[2] = p_maskbool3[iVertex + NUMVERTICES*2];
}
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
shared_pos_verts[threadIdx.x] = info.pos;
#ifdef CENTROID_HEATCONDUCTION
{
structural infotemp[2];
memcpy(infotemp, p_info_minor + 2 * iVertex, 2 * sizeof(structural));
shared_pos[threadIdx.x * 2] = infotemp[0].pos;
shared_pos[threadIdx.x * 2 + 1] = infotemp[1].pos;
f64 tempf64[2];
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
// No nu to set for neutrals - not used
}
#else
{
memcpy(&(shared_pos[threadIdx.x * 2]), p_cc + 2 * iVertex, 2 * sizeof(f64_vec2));
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_n + 2 * iVertex, 2 * sizeof(f64));
}
#endif
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_B[threadIdx.x] = p_B_major[iVertex].xypart();
if (TESTHEATFULL) printf("iVertex %d : B_major[iVertex] %1.10E %1.10E \n^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&\n",
iVertex, p_B_major[iVertex].x, p_B_major[iVertex].y);
shared_T[threadIdx.x] = p_T_major[iVertex].Tn;
}
else {
// SHOULD NOT BE LOOKING INTO INS.
// How do we avoid?
memset(&(shared_B[threadIdx.x]), 0, sizeof(f64_vec2));
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
f64_vec2 grad_T;
f64 T_anti, T_clock, T_out, T_outk; // 5
f64_vec2 pos_clock, pos_anti, pos_out; // +6
f64_vec2 B_out; // +2
NTrates ourrates; // +5
f64 kappa_parallel; // do we use them all at once or can we save 2 doubles here?
f64 nu; // 20 there
f64_vec2 edge_normal; // 22
f64_vec2 endpt_anti; // 24 .. + 6 from above
long indexneigh; // into the 2-double buffer in L1
f64_vec2 endpt_clock; // As we only use endpt_anti afterwords we could union endpt_clock with edge_normal
// Come back and optimize by checking which things we need in scope at the same time?
short iNeigh; // only fixed # of addresses so short makes no difference.
char PBC; // char makes no difference.
if ((bUseMask == 0) || (bMask[0] == true) || (bMask[1] == true) || (bMask[2] == true))
{
// Need this, we are adding on to existing d/dt N,NT :
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(Indexneigh + MAXNEIGH_d * threadIdx.x,
pIndexNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(long));
memcpy(PBCneigh + MAXNEIGH_d * threadIdx.x,
pPBCNeigh + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(PBCtri + MAXNEIGH_d * threadIdx.x,
szPBCtri_verts + MAXNEIGH_d * iVertex,
MAXNEIGH_d * sizeof(char));
memcpy(izTri, //+ MAXNEIGH_d * threadIdx.x,
izTri_verts + MAXNEIGH_d * iVertex, MAXNEIGH_d * sizeof(long));
}
if ((bUseMask == 0) || (bMask[0] == true)) // either there is no masking, or this is switched on
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// // [ Ignore flux into edge of outermost vertex I guess ???]
// long index0 = Indexneigh[MAXNEIGH_d * threadIdx.x + 0];
// long index1 = Indexneigh[MAXNEIGH_d * threadIdx.x + 1];
// long index2 = Indexneigh[MAXNEIGH_d * threadIdx.x + 2];
// long index3 = Indexneigh[MAXNEIGH_d * threadIdx.x + 3];
// long index4 = Indexneigh[MAXNEIGH_d * threadIdx.x + 4];
// printf("DEBUG: iVertex %d info.neigh_len %d izNeigh %d %d %d %d \n"
// "flags 0 %d %d %d %d \n"
// "positions (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) \n"
// , iVertex, info.neigh_len,
// index0, index1, index2, index3,
// p_info_minor[index0+BEGINNING_OF_CENTRAL].flag,
// p_info_minor[index1 + BEGINNING_OF_CENTRAL].flag,
// p_info_minor[index2 + BEGINNING_OF_CENTRAL].flag,
// p_info_minor[index3 + BEGINNING_OF_CENTRAL].flag,
// p_info_minor[index0 + BEGINNING_OF_CENTRAL].pos.x, p_info_minor[index0 + BEGINNING_OF_CENTRAL].pos.y,
// p_info_minor[index1 + BEGINNING_OF_CENTRAL].pos.x, p_info_minor[index1 + BEGINNING_OF_CENTRAL].pos.y,
// p_info_minor[index2 + BEGINNING_OF_CENTRAL].pos.x, p_info_minor[index2 + BEGINNING_OF_CENTRAL].pos.y,
// p_info_minor[index3 + BEGINNING_OF_CENTRAL].pos.x, p_info_minor[index3 + BEGINNING_OF_CENTRAL].pos.y
// );
} else {
if (info.flag == DOMAIN_VERTEX) {
// The idea of not sending blocks full of non-domain vertices is another idea. Fiddly with indices.
// Now do Tn:
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
} else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Tn;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Tn;
#endif
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Tn;
#endif
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Tn; // ready for switch around
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
//#ifdef BWDSIDET
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_clock = T_outk;
//#endif
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Tn;
#endif
};
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Tn; // Stupid 3-struct
// Also need to update T_opp if it was not done already
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Tn;
};
#endif
//
// if (T_anti == 0.0) {
//#ifdef BWDSIDET
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_anti = T_outk;
//#endif
// }; // So we are receiving 0 then doing this. But how come?
// Mimic
// Now let's see
// tri 0 has neighs 0 and 1 I'm pretty sure (check....) CHECK
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
// we should switch back to centroids!!
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
// How to detect? Loading a load of flags is a killer! We do need to load ... and this is why we should have not made info struct. Def not.
////
//if (insulator triangle)
//{
// centroid1 = THIRD*(pos_anti + pos_out + info.pos);
// // project to radius of insulator
// centroid1.project_to_radius(3.44);
// // Now dot with unit vectors:
// f64_vec2 tempvec2;
// tempvec2.x = unit_vec1.x*centroid1.x + unit_vec1.y*centroid1.y;
// tempvec2.y = unit_vec2.x*centroid1.x + unit_vec2.y*centroid1.y;
// centroid1.x = tempvec2.x;
// centroid1.y = tempvec2.y;
//} else {
// // centroid1 = THIRD*(pos_anti_twist + pos_out_twist);
// centroid1.x = THIRD*(
// unit_vec1.x*(pos_anti.x - info.pos.x) + unit_vec1.y*(pos_anti.y - info.pos.y)
// + unit_vec1.x*(pos_out.x - info.pos.x) + unit_vec1.y*(pos_out.y - info.pos.y)
// );
// centroid1.y = THIRD*(
// - unit_vec1.y*(pos_anti.x - info.pos.x) + unit_vec1.x*(pos_anti.y - info.pos.y)
// - unit_vec1.y*(pos_out.x - info.pos.x) + unit_vec1.x*(pos_out.y - info.pos.y)
// );
//}
//if (insulator triangle)
//{
// centroid2 = THIRD*(pos_clock + pos_out + info.pos);
// // project to radius of insulator
//} else {
//}
kappa_parallel = 0.0;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_n[izTri[iNeigh]];
}
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_n[izTri[iPrev]];
}
}
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
// When we come to do the other species, make a subroutine.
if ((T_clock == 0.0) || (T_anti == 0.0)) {
f64 edgelen = edge_normal.modulus();
ourrates.NnTn += TWOTHIRDS * kappa_parallel * edgelen *
(T_out - shared_T[threadIdx.x]) / (pos_out - info.pos).modulus();
} else {
grad_T.x = 0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
ourrates.NnTn += TWOTHIRDS * kappa_parallel * grad_T.dot(edge_normal);
};
// This is correct, grad T in same coordinates as edge_normal...
};
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifdef BWDSIDET
T_clock = T_out;
T_out = T_anti;
#else
T_clock = T_outk;
T_outk = T_anti;
#endif
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
};
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_i + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_i + 2 * iVertex, 2 * sizeof(f64));
}
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_major[iVertex].Ti;
// Notice major inefficiency caused by not making them scalar T arrays
}
else {
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
if ((bUseMask == 0) || (bMask[1] == true)) // either there is no masking, or this is switched on
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Ti;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Ti;
#endif
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Ti;
#endif
};
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Ti;
#endif
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
//#ifdef BWDSIDET
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_clock = T_outk;
//#endif
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Ti;
#endif
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Ti;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Ti;
};
#endif
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
//
// if (T_anti == 0.0) {
//#ifdef BWDSIDET
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_anti = T_outk;
//#endif
// }; // So we are receiving 0 then doing this. But how come?
// Mimic
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
} else {
kappa_parallel = 0.5*p_kappa_i[izTri[iNeigh]];
nu = 0.5*p_nu_i[izTri[iNeigh]];
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
} else {
kappa_parallel += 0.5*p_kappa_i[izTri[iPrev]];
nu += 0.5*p_nu_i[izTri[iPrev]];
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
} else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
// Use longitudinal:
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
f64_vec3 omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
f64 long_contrib = TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
ourrates.NiTi += long_contrib;
} else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
grad_T.x = 0.5*(
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
f64 contrib = TWOTHIRDS * kappa_parallel *(
edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y - nu * omega.z)*grad_T.y)
+ edge_normal.y*((omega.x*omega.y + nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
) / (nu * nu + omega.dot(omega));
// Rule 1. Not a greater flow than isotropic
// Rule 2. Not the opposite direction to isotropic - minimum zero
f64 iso_contrib = TWOTHIRDS * kappa_parallel *(edge_normal.x*grad_T.x + edge_normal.y*grad_T.y);
if (contrib > 0.0) {
if ((iso_contrib > 0.0) && (contrib > iso_contrib)) contrib = iso_contrib;
if (iso_contrib < 0.0) contrib = 0.0;
} else {
if ((iso_contrib < 0.0) && (contrib < iso_contrib)) contrib = iso_contrib;
if (iso_contrib > 0.0) contrib = 0.0;
}
//
// if (TESTHEATFULL) printf("%d iNeigh %d kappa_ion %1.8E nu %1.8E |o| %1.8E contrib %1.8E \n",
// iVertex, iNeigh, kappa_parallel, nu,
// omega.modulus(),
// TWOTHIRDS * kappa_parallel *(
// edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y + nu * omega.z)*grad_T.y)
// + edge_normal.y*((omega.x*omega.y - nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
// ) / (nu * nu + omega.dot(omega))
// );
//
if (bCheckWhetherToDoctorUp) {
// Now ask if this flow is going uphill:
bool b_out = p_bool_longi[indexneigh * 2];
bool b_here = p_bool_longi[iVertex * 2]; // 2 random reads --- we could put bools into shared easily
if (b_out || b_here) {
// use longitudinal flows
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
f64 long_contrib = TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
// printf("ION %d : %d T T_out %1.8E %1.8E T_put T_putout %1.8E %1.8E cont %1.9E long %1.9E\n",
// iVertex, indexneigh, shared_T[threadIdx.x], T_out, T_here2, Tout2, contrib, long_contrib);
// if (((T_here2 < Tout2) && (contrib < 0.0)) || ((T_here2 > Tout2) && (contrib > 0.0))) {
// Either we are less but shrinking or more but growing
contrib = long_contrib;
};
};
ourrates.NiTi += contrib;
}; // scoping brace
}
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifndef BWDSIDET
T_clock = T_outk;
T_outk = T_anti;
#else
T_clock = T_out;
T_out = T_anti;
#endif
}; // next iNeigh
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
}; // mask
__syncthreads();
// Did we make sure to include a call to syncthreads every time we carried on to update shared memory data in every other routine?
// ##################################################################################################################################
{
memcpy(&(shared_kappa[threadIdx.x * 2]), p_kappa_e + 2 * iVertex, 2 * sizeof(f64));
memcpy(&(shared_nu[threadIdx.x * 2]), p_nu_e + 2 * iVertex, 2 * sizeof(f64));
}
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
shared_T[threadIdx.x] = p_T_major[iVertex].Te;
}
else {
shared_T[threadIdx.x] = 0.0;
}
__syncthreads();
if ((bUseMask) && (bMask[2] == 0)) return;
{
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
{
// [ Ignore flux into edge of outermost vertex I guess ???]
}
else {
if (info.flag == DOMAIN_VERTEX) {
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_clock = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_clock = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_clock = info2.pos;
#ifdef BWDSIDET
T_clock = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_clock = p_T_k[indexneigh].Te;
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == NEEDS_ANTI) {
pos_clock = Anticlock_rotate2(pos_clock);
};
if (PBC == NEEDS_CLOCK) {
pos_clock = Clockwise_rotate2(pos_clock);
};
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + 0];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_out = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_out = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_out = info2.pos;
#ifdef BWDSIDET
T_out = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_outk = p_T_k[indexneigh].Te;
#endif
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + 0];
if (PBC == NEEDS_ANTI) {
pos_out = Anticlock_rotate2(pos_out);
};
if (PBC == NEEDS_CLOCK) {
pos_out = Clockwise_rotate2(pos_out);
};
if ((izTri[info.neigh_len - 1] >= StartMinor) && (izTri[info.neigh_len - 1] < EndMinor))
{
endpt_clock = shared_pos[izTri[info.neigh_len - 1] - StartMinor];
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_clock = p_info_minor[izTri[info.neigh_len - 1]].pos;
#else
endpt_clock = p_cc[izTri[info.neigh_len - 1]];
#endif
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + info.neigh_len - 1];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_clock = Clockwise_d * endpt_clock;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_clock = Anticlockwise_d * endpt_clock;
//
// if (T_clock == 0.0) {
//#ifdef BWDSIDET
// T_clock = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_clock = T_outk;
//#endif
// };
// Mimic
#pragma unroll MAXNEIGH_d
for (iNeigh = 0; iNeigh < info.neigh_len; iNeigh++)
{
{
short iNext = iNeigh + 1; if (iNext == info.neigh_len) iNext = 0;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNext];
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNext];
}
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
pos_anti = shared_pos_verts[indexneigh - StartMajor];
#ifdef BWDSIDET
T_anti = shared_T[indexneigh - StartMajor];
#endif
}
else {
structural info2 = p_info_minor[indexneigh + BEGINNING_OF_CENTRAL];
pos_anti = info2.pos;
#ifdef BWDSIDET
T_anti = p_T_major[indexneigh].Te;
#endif
};
#ifndef BWDSIDET
T_anti = p_T_k[indexneigh].Te;
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
T_out = shared_T[indexneigh - StartMajor];
}
else {
T_out = p_T_major[indexneigh].Te;
}
#endif
if (PBC == NEEDS_ANTI) {
pos_anti = Anticlock_rotate2(pos_anti);
};
if (PBC == NEEDS_CLOCK) {
pos_anti = Clockwise_rotate2(pos_anti);
};
//
// if (T_anti == 0.0) {
//#ifdef BWDSIDET
// T_anti = 0.5*(shared_T[threadIdx.x] + T_out);
//#else
// T_anti = T_outk;
//#endif
// }; // So we are receiving 0 then doing this. But how come?
// mimic
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
endpt_anti = shared_pos[izTri[iNeigh] - StartMinor];
if (0) {
printf("%d : %d endpt_anti %1.9E %1.9E SHARED endpt_clock %1.9E %1.9E izTri[iNeigh] %d\n",
iVertex, iNeigh, endpt_anti.x, endpt_anti.y, endpt_clock.x, endpt_clock.y, izTri[iNeigh]);
}
}
else {
#ifdef CENTROID_HEATCONDUCTION
endpt_anti = p_info_minor[izTri[iNeigh]].pos;
#else
endpt_anti = p_cc[izTri[iNeigh]];
#endif
if (0) {
printf("%d : %d endpt_anti %1.9E %1.9E GLOBAL endpt_clock %1.9E %1.9E izTri[iNeigh] %d\n",
iVertex, iNeigh, endpt_anti.x, endpt_anti.y, endpt_clock.x, endpt_clock.y, izTri[iNeigh]);
}
}
PBC = PBCtri[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == ROTATE_ME_CLOCKWISE) endpt_anti = Clockwise_d * endpt_anti;
if (PBC == ROTATE_ME_ANTICLOCKWISE) endpt_anti = Anticlockwise_d * endpt_anti;
// It decided to rotate something it shouldn't oughta. Rotated tri 23600 = tri 2 for 11582.
edge_normal.x = (endpt_anti.y - endpt_clock.y);
edge_normal.y = (endpt_clock.x - endpt_anti.x);
// SMARTY:
if (TestDomainPos(pos_out))
{
kappa_parallel = 0.0;
f64 nu;
if ((izTri[iNeigh] >= StartMinor) && (izTri[iNeigh] < EndMinor))
{
kappa_parallel = 0.5*shared_kappa[izTri[iNeigh] - StartMinor];
nu = 0.5*shared_nu[izTri[iNeigh] - StartMinor];
}
else {
kappa_parallel = 0.5*p_kappa_e[izTri[iNeigh]];
nu = 0.5*p_nu_e[izTri[iNeigh]];
};
{
short iPrev = iNeigh - 1; if (iPrev < 0) iPrev = info.neigh_len - 1;
if ((izTri[iPrev] >= StartMinor) && (izTri[iPrev] < EndMinor))
{
kappa_parallel += 0.5*shared_kappa[izTri[iPrev] - StartMinor];
nu += 0.5*shared_nu[izTri[iPrev] - StartMinor];
}
else {
kappa_parallel += 0.5*p_kappa_e[izTri[iPrev]];
nu += 0.5*p_nu_e[izTri[iPrev]];
}
}
indexneigh = Indexneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
B_out = shared_B[indexneigh - StartMajor];
}
else {
f64_vec3 B_out3 = p_B_major[indexneigh];
B_out = B_out3.xypart();
}
PBC = PBCneigh[MAXNEIGH_d*threadIdx.x + iNeigh];
if (PBC == NEEDS_ANTI) B_out = Anticlock_rotate2(B_out);
if (PBC == NEEDS_CLOCK) B_out = Clockwise_rotate2(B_out);
if ((!TestDomainPos(pos_clock)) || (!TestDomainPos(pos_anti)))
{
// Use longitudinal:
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
f64_vec3 omega = Make3(qoverMc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qoverMc);
f64 long_contrib = TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
ourrates.NeTe += long_contrib;
if (TESTHEATFULL) printf("iVertex %d iNeigh %d long_contrib %1.14E T_out %1.9E ours %1.9E kappa_par %1.9E factor %1.9E\n",
iVertex, iNeigh, long_contrib,
T_out, shared_T[threadIdx.x], kappa_parallel,
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (edgelen*(nu * nu + omega.dot(omega))));
}
else {
f64 Area_quadrilateral = 0.5*(
(info.pos.x + pos_anti.x)*(info.pos.y - pos_anti.y)
+ (pos_clock.x + info.pos.x)*(pos_clock.y - info.pos.y)
+ (pos_out.x + pos_clock.x)*(pos_out.y - pos_clock.y)
+ (pos_anti.x + pos_out.x)*(pos_anti.y - pos_out.y)
);
grad_T.x = 0.5*(
(shared_T[threadIdx.x] + T_anti)*(info.pos.y - pos_anti.y)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.y - info.pos.y)
+ (T_out + T_clock)*(pos_out.y - pos_clock.y)
+ (T_anti + T_out)*(pos_anti.y - pos_out.y)
) / Area_quadrilateral;
grad_T.y = -0.5*( // notice minus
(shared_T[threadIdx.x] + T_anti)*(info.pos.x - pos_anti.x)
+ (T_clock + shared_T[threadIdx.x])*(pos_clock.x - info.pos.x)
+ (T_out + T_clock)*(pos_out.x - pos_clock.x)
+ (T_anti + T_out)*(pos_anti.x - pos_out.x)
) / Area_quadrilateral;
//kappa.xx = kappa_parallel * (nu_eHeart*nu_eHeart + omega.x*omega.x) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.xy = kappa_parallel * (omega.x*omega.y - nu_eHeart *omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yx = kappa_parallel * (omega.x*omega.y + nu_eHeart * omega.z) / (nu_eHeart * nu_eHeart + omega_sq);
//kappa.yy = kappa_parallel * (omega.y*omega.y + nu_eHeart * nu_eHeart) / (nu_eHeart * nu_eHeart + omega_sq);
{ // scoping brace
f64_vec3 omega = Make3(qovermc * 0.5*(shared_B[threadIdx.x] + B_out), BZ_CONSTANT*qovermc);
// PROBABLY ALWAYS SPILLED INTO GLOBAL -- WHAT CAN WE DO?
f64 contrib = TWOTHIRDS * kappa_parallel *(
edge_normal.x*((nu*nu + omega.x*omega.x)*grad_T.x + (omega.x*omega.y - nu * omega.z)*grad_T.y)
+ edge_normal.y*((omega.x*omega.y + nu * omega.z)*grad_T.x + (omega.y*omega.y + nu * nu)*grad_T.y)
) / (nu * nu + omega.dot(omega));
// Rule 1. Not a greater flow than isotropic
// Rule 2. Not the opposite direction to isotropic - minimum zero
f64 iso_contrib = TWOTHIRDS * kappa_parallel *(edge_normal.x*grad_T.x + edge_normal.y*grad_T.y);
if (TESTHEATFULL) printf(
"iVertex %d iNeigh %d contrib %1.9E iso_contrib %1.9E \n"
"edge_normal %1.8E %1.8E \n"
"T %1.9E Tout %1.9E T_anti %1.9E T_clock %1.9E\n"
" kappa_par %1.9E nu %1.9E |omega| %1.9E Area %1.9E\n"
"our_n %1.9E our n_n %1.9E nearby n %1.9E %1.9E\n"
"pos %1.8E %1.8E opp %1.8E %1.8E anti %1.8E %1.8E clock %1.8E %1.8E\n"
"omega %1.8E %1.8E grad_T %1.9E %1.9E \n"
"=================================================\n"
, iVertex, iNeigh,
contrib, iso_contrib,
edge_normal.x, edge_normal.y, shared_T[threadIdx.x], T_out, T_anti, T_clock,
kappa_parallel, nu, sqrt(omega.dot(omega)),
p_AreaMajor[iVertex],
p_n_major[iVertex].n, p_n_major[iVertex].n_n, p_n_major[indexneigh].n, p_n_major[indexneigh].n_n,
info.pos.x, info.pos.y, pos_out.x, pos_out.y, pos_anti.x, pos_anti.y, pos_clock.x, pos_clock.y,
omega.x, omega.y, grad_T.x, grad_T.y);
if (TESTHEATFULL) printf("shared B[threadIdx.x] %1.10E %1.10E B_out %1.10E %1.10E\n",
shared_B[threadIdx.x].x, shared_B[threadIdx.x].y, B_out.x, B_out.y);
if (contrib > 0.0) {
if ((iso_contrib > 0.0) && (contrib > iso_contrib)) contrib = iso_contrib;
if (iso_contrib < 0.0) contrib = 0.0;
}
else {
if ((iso_contrib < 0.0) && (contrib < iso_contrib)) contrib = iso_contrib;
if (iso_contrib > 0.0) contrib = 0.0;
}
if (bCheckWhetherToDoctorUp) {
// Now ask if this flow is going uphill:
bool b_out = p_bool_longi[indexneigh * 2 + 1];
bool b_here = p_bool_longi[iVertex * 2 + 1];
if (b_out || b_here) {
// use longitudinal flows
f64 edgelen = edge_normal.modulus();
f64 delta_out = sqrt((info.pos.x - pos_out.x)*(info.pos.x - pos_out.x) + (info.pos.y - pos_out.y)*(info.pos.y - pos_out.y));
f64 long_contrib = TWOTHIRDS * kappa_parallel * (T_out - shared_T[threadIdx.x]) *
(nu*nu*edgelen*edgelen + omega.dotxy(edge_normal)*omega.dotxy(edge_normal))
/ (delta_out*edgelen*(nu * nu + omega.dot(omega)));
// printf("ELEC %d : %d T T_out %1.8E %1.8E T_put T_putout %1.8E %1.8E cont %1.9E long %1.9E\n",
// iVertex, indexneigh, shared_T[threadIdx.x], T_out, T_here2, Tout2, contrib, long_contrib);
// if (((T_here2 < Tout2) && (contrib < 0.0)) || ((T_here2 > Tout2) && (contrib > 0.0))) {
// Either we are less but shrinking or more but growing
contrib = long_contrib;
if (TESTHEATFULL) printf("contrib = long contrib %1.14E \n", contrib);
};
};
if (TESTHEATFULL) printf("iVertex %d ourrates.NeTe before: %1.14E contrib %1.12E\n", iVertex, ourrates.NeTe, contrib);
ourrates.NeTe += contrib;
if (TESTHEATFULL) printf("iVertex %d ourrates.NeTe after: %1.14E \n", iVertex, ourrates.NeTe);
}
};
} // if (pos_out.x*pos_out.x + pos_out.y*pos_out.y > ...)
// Now go round:
endpt_clock = endpt_anti;
pos_clock = pos_out;
pos_out = pos_anti;
#ifdef BWDSIDET
T_clock = T_out;
T_out = T_anti;
#else
T_clock = T_outk;
T_outk = T_anti;
#endif
}; // next iNeigh
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
}; // was it DOMAIN_VERTEX? Do what otherwise?
}; // was it OUTERMOST/INNERMOST?
};
}
__global__ void kernelCreatePutativeT(
f64 hsub,
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_k,
// T3 * __restrict__ p_T_putative,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
NTrates * __restrict__ NTadditionrates,
bool * __restrict__ p_boolarray, // 2x NMAJOR
bool * __restrict__ p_bFailedtest,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskBlock, // do 1 for all species
bool bUseMask
)
{
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
if (iVertex == VERTCHOSEN) printf("kernelCreatePutative %d : %d %d \n", iVertex,
(bUseMask) ? 1 : 0, (p_bMaskBlock[blockIdx.x]) ? 1 : 0);
if ((bUseMask) && (p_bMaskBlock[blockIdx.x] == 0)) return;
bool bMask[3];
if (bUseMask) {
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + 2 * NUMVERTICES];
if (iVertex == VERTCHOSEN) printf("kernelCreatePutative %d : %d %d \n", iVertex,
(bMask[1]) ? 1 : 0, (bMask[2]) ? 1 : 0);
if ((bMask[1] == 0) && (bMask[2] == 0)) return; // we do nothing here with neutrals
};
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
T3 T_k = p_T_k[iVertex];
nvals n = p_n_major[iVertex];
f64 AreaMajor = p_AreaMajor[iVertex];
NTrates NT = NTadditionrates[iVertex];
T3 T_put;
// T_put.Tn = T_k.Tn + hsub* NT.NeTe / (n.n_n*AreaMajor); // serves no purpose...
T_put.Ti = T_k.Ti + hsub*NT.NiTi / (n.n*AreaMajor);
T_put.Te = T_k.Te + hsub*NT.NeTe / (n.n*AreaMajor);
//if (iVertex == VERTCHOSEN) printf("%d T_e_k %1.8E NeTe %1.8E N %1.8E T_put %1.8E\n",
// iVertex, T_k.Te, NT.NeTe, (n.n*AreaMajor), T_put.Te);
if (iVertex == VERTCHOSEN) printf("kernelCreatePutative %d : T_put.Te %1.10E NeTe %1.10E \n", iVertex,
T_put.Te, NT.NeTe);
bool ourbools[2];
bool bAlert = false;
memcpy(ourbools, p_boolarray + 2 * iVertex, sizeof(bool) * 2);
//if (iVertex == VERTCHOSEN) printf("%d Te_putative %1.10E NT.NeTe %1.10E ourbool %d bAlert %d\n", iVertex, T_put.Te, NT.NeTe,
// ourbools[1]?1:0, bAlert?1:0);
if (((bUseMask == 0) || (bMask[1] == true)) && (T_put.Ti < 0.0)) {
if (ourbools[0] == 0) bAlert = true;
ourbools[0] = true;
};
if (((bUseMask == 0) || (bMask[2] == true)) && (T_put.Te < 0.0)) {
if (ourbools[1] == 0) bAlert = true;
ourbools[1] = true;
};
if (iVertex == VERTCHOSEN) printf("kernelCreatePutative %d :ourbools[1] %d \n", iVertex,
(ourbools[1]) ? 1 : 0);
memcpy(p_boolarray + 2 * iVertex, ourbools, sizeof(bool) * 2);
if (bAlert) p_bFailedtest[blockIdx.x] = true;
}
__global__ void kernelReturnNumberNegativeT(
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T,
long * __restrict__ p_sum
)
{
__shared__ long sum[threadsPerTileMajorClever];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
sum[threadIdx.x] = 0;
if (info.flag == DOMAIN_VERTEX) {
T3 T = p_T[iVertex];
if ((T.Tn < 0.0) || (T.Ti < 0.0) || (T.Te < 0.0))
{
printf("iVertex %d T %1.8E %1.8E %1.8E flag %d pos %1.10E %1.10E\n", iVertex, T.Tn, T.Ti, T.Te, info.flag,
info.pos.x, info.pos.y);
sum[threadIdx.x] = 1;
// Really does find only 1 -- 19498 Ti. Could spit out more about
// why it happened.
// So is there an alternative?
}
// worth it? can we easier/better just blitz the out-of-domain T to 0 and load it?
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum[threadIdx.x] += sum[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum[threadIdx.x] += sum[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sum[blockIdx.x] = sum[0];
};
}
/*
__global__ void kernelSetNeighboursBwd(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izNeigh_vert,
bool * __restrict__ p_bMask3)
{
Won't work because it needs separate src and dest memory.
__shared__ bool bMask[threadsPerTileMajorClever][3];
long const iVertex = threadIdx.x + blockIdx.x*blockDim.x;
memcpy(bMask[threadIdx.x], p_bMask3 + 3 * iVertex, sizeof(bool) * 3);
long const StartMajor = blockIdx.x*blockDim.x;
long const EndMajor = StartMajor + blockDim.x;
// check row-major meaning.
__syncthreads();
bool bMask3[3], bMaskNeigh[3];
bMask3[0] = bMask[threadIdx.x][0];
bMask3[1] = bMask[threadIdx.x][1];
bMask3[2] = bMask[threadIdx.x][2];
structural info = p_info_minor[iVertex];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
long izNeigh[MAXNEIGH_d];
memcpy(izNeigh, p_izNeigh_vert + MAXNEIGH_d*iVertex, sizeof(long)*MAXNEIGH_d);
#pragma unroll MAXNEIGH_d
for (int i = 0; (i < info.neigh_len); i++)
{
long indexneigh = izNeigh[i];
if ((indexneigh >= StartMajor) && (indexneigh < EndMajor))
{
memcpy(bMaskNeigh, bMask[indexneigh - StartMajor], sizeof(bool) * 3);
} else {
memcpy(bMaskNeigh, p_bMask3 + 3 * indexneigh, sizeof(bool) * 3);
};
if (bMaskNeigh[0]) bMask3[0] = true;
if (bMaskNeigh[1]) bMask3[1] = true;
if (bMaskNeigh[2]) bMask3[2] = true;
};
if ((bMaskNeigh[0]) || (bMaskNeigh[1]) || (bMaskNeigh[2])) {
memcpy(p_bMask3 + 3 * iVertex, bMask, sizeof(bool) * 3);
// otherwise, it was 0 to start with; let it still be 0.
};
}
}
*/
__global__ void kernelSetBlockMaskFlag_CountEquations_reset_Tk(
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskBlock,
long * __restrict__ p_longblock3,
T3 * __restrict__ p_T_k,
T3 * __restrict__ p_T
)
{
__shared__ bool bAlert[3];
__shared__ long sum0[threadsPerTileMajorClever];
__shared__ long sum1[threadsPerTileMajorClever];
__shared__ long sum2[threadsPerTileMajorClever]; // need to save all 3 values
if (threadIdx.x < 3)
bAlert[threadIdx.x] = 0;
sum0[threadIdx.x] = 0.0;
sum1[threadIdx.x] = 0.0;
sum2[threadIdx.x] = 0.0;
__syncthreads();
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask[3];
//memcpy(bMask, p_bMask3 + 3 * iVertex, sizeof(bool) * 3);
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + NUMVERTICES*2];
if (bMask[0]) {
bAlert[0] = true;
sum0[threadIdx.x]++;
};
if (bMask[1]) {
bAlert[1] = true;
sum1[threadIdx.x]++;
printf("Ion: %d\n", iVertex);
};
if (bMask[2]) {
bAlert[2] = true; // maybe this does not work.
sum2[threadIdx.x]++;
printf("Elec: %d | ", iVertex);
};
if ((bMask[0]) || (bMask[1]) || (bMask[2]))
{
T3 T = p_T[iVertex];
T3 Tk = p_T_k[iVertex];
if (bMask[0]) T.Tn = Tk.Tn;
if (bMask[1]) T.Ti = Tk.Ti;
if (bMask[2]) T.Te = Tk.Te;
p_T[iVertex] = T;
}
__syncthreads();
// if (iVertex == VERTCHOSEN) printf(" %d bAlert %d %d %d \n",
// iVertex, bAlert[0] ? 1 : 0, bAlert[1] ? 1 : 0, bAlert[2] ? 1 : 0);
if (threadIdx.x == 0) {
p_bMaskBlock[blockIdx.x] = (bAlert[0] || bAlert[1] || bAlert[2]);
}
// all this doing but we want to split into species solves anyway.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum0[threadIdx.x] += sum0[threadIdx.x + k];
sum1[threadIdx.x] += sum1[threadIdx.x + k];
sum2[threadIdx.x] += sum2[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum0[threadIdx.x] += sum0[threadIdx.x + s - 1];
sum1[threadIdx.x] += sum1[threadIdx.x + s - 1];
sum2[threadIdx.x] += sum2[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_longblock3[blockIdx.x*3] = sum0[0];
p_longblock3[blockIdx.x*3+1] = sum1[0];
p_longblock3[blockIdx.x*3+2] = sum2[0];
};
}
__global__ void kernelCompareForStability_andSetFlag(
structural * __restrict__ p_info_minor,
NTrates * __restrict__ p_NTrates1,
NTrates * __restrict__ p_NTrates2,
long * __restrict__ p_sum,
bool * __restrict__ p_bMask3
)
{
__shared__ long sum[threadsPerTileMajorClever];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
bool bMask[3];
sum[threadIdx.x] = 0;
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
if (info.flag == DOMAIN_VERTEX) {
NTrates dNTdt1 = p_NTrates1[iVertex];
NTrates dNTdt2 = p_NTrates2[iVertex];
//memcpy(bMask, p_bMask3 + iVertex * 3, sizeof(bool) * 3);
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + NUMVERTICES * 2];
// we want to check if 2 is greater magnitude than 1 and reversed sign
if ((dNTdt2.NnTn*dNTdt1.NnTn < 0.0)
&& (fabs(dNTdt2.NnTn) > fabs(dNTdt1.NnTn)))
{
sum[threadIdx.x]++;
bMask[0] = 1;
}
if ((dNTdt2.NiTi*dNTdt1.NiTi < 0.0)
&& (fabs(dNTdt2.NiTi) > fabs(dNTdt1.NiTi))) {
sum[threadIdx.x]++;
bMask[1] = 1;
}
if ((dNTdt2.NeTe*dNTdt1.NeTe < 0.0)
&& (fabs(dNTdt2.NeTe) > fabs(dNTdt1.NeTe))) {
sum[threadIdx.x]++;
bMask[2] = 1;
};
p_bMask3[iVertex] = bMask[0];
p_bMask3[iVertex + NUMVERTICES] = bMask[1];
p_bMask3[iVertex + NUMVERTICES * 2] = bMask[2];
};
// non domain mask flags already set to 0
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum[threadIdx.x] += sum[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum[threadIdx.x] += sum[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sum[blockIdx.x] = sum[0];
};
}
__global__ void kernelCreatePutativeTandsave(
f64 hsub,
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_k,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
NTrates * __restrict__ NTadditionrates,
T3 * __restrict__ p_T_dest,
bool * bMask3
)
{
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX // 2.5 double
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL]; // 3 double
bool bMask[3];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
T3 T_k = p_T_k[iVertex];
nvals n = p_n_major[iVertex];
f64 AreaMajor = p_AreaMajor[iVertex];
NTrates NT = NTadditionrates[iVertex];
T3 T_put;
T_put.Tn = T_k.Tn + hsub* NT.NeTe / (n.n_n*AreaMajor);
T_put.Ti = T_k.Ti + hsub*NT.NiTi / (n.n*AreaMajor);
T_put.Te = T_k.Te + hsub*NT.NeTe / (n.n*AreaMajor);
p_T_dest[iVertex] = T_put;
memset(bMask, 0, sizeof(bool) * 3);
if (T_put.Tn < 0.0) bMask[0] = 1;
if (T_put.Ti < 0.0) bMask[1] = 1;
if (T_put.Te < 0.0) bMask[2] = 1;
} else {
memset(bMask, 0, sizeof(bool) * 3);
}
if (iVertex == 22351) printf("22351 info.flag %d bMask %d %d %d \n",
info.flag, (bMask[0] ? 1 : 0), (bMask[1] ? 1 : 0), (bMask[2] ? 1 : 0));
bMask3[iVertex] = bMask[0];
bMask3[iVertex + NUMVERTICES] = bMask[1];
bMask3[iVertex + 2 * NUMVERTICES] = bMask[2];
//memcpy(bMask3 + iVertex * 3, bMask, sizeof(bool) * 3);
}
__global__ void kernelIonisationRates_Forward_Euler(
f64 const h_use,
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_major,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
NTrates * __restrict__ NTadditionrates,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
// We are in major cells so actually output this to a fresh temp array (9 scalars)
// which we then share out into minor cells.
v4 * __restrict__ p_v,
f64_vec3 * __restrict__ p_v_n,
T3 * __restrict__ p_T_use_major,
bool b_useTuse
)
// ** SIMPLIFIED VERSION **
{
#define SAFETY_FACTOR 1.2
#define LEEWAY 1.0e-23
#define vAC 218687393.0 // Alfven Critical velocity = sqrt(13.6*1.6e-12*2/me)
long const iVertex = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
NTrates ourrates;
f64_vec3 MAR_neut, MAR_ion, MAR_elec;
v4 v;
f64_vec3 v_n;
f64 T_use;
if (info.flag == DOMAIN_VERTEX)
{
// case DOMAIN_VERTEX:
f64 lambda;
f64 AreaMajor = p_AreaMajor[iVertex];
T3 T_k = p_T_major[iVertex];
if (b_useTuse) {
T3 T = p_T_use_major[iVertex];
T_use = T.Te;
}
else {
T_use = T_k.Te;
}
nvals our_n = p_n_major[iVertex];
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(&MAR_neut, p_MAR_neut + iVertex, sizeof(f64_vec3)); // are we passing stuff from central then?
memcpy(&MAR_ion, p_MAR_ion + iVertex, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec + iVertex, sizeof(f64_vec3)); // it does mean d/dt (Nv)
memcpy(&v, p_v + iVertex, sizeof(v4));
memcpy(&v_n, p_v_n + iVertex, sizeof(f64_vec3));
// 0 . What is lambda?
f64 oldT1;
f64 n_k = our_n.n;
f64 n_n_k = our_n.n_n;
f64 n_kplus1, n_n_kplus1, n_kplus2;
f64 Gamma_ion, Gamma_rec, hn, hnn, Delta_ionise, Delta_rec;
// lambda = 0.5*reduced mass*w0.dot(w0) / T_k.Te;
f64 w0z = v.vez - v_n.z;
// What is capital Theta of T_k ?
//f64 w = sqrt(w0z*w0z); // WE ARE ONLY USING Z DIMENSION FOR ABSORBING KINETIC ENERGY
// Check again: how did we come up with the following formulas?
// Off of the lambda spreadsheet or the v spreadsheet? I think lambda.
f64 T_use_theta = T_k.Te;
if (T_use_theta < 1.0e-12) T_use_theta = 1.0e-12;
f64 Theta = (1.1 + 0.4e-12 / T_use_theta);
if (w0z < vAC - 0.4e-4 / T_use_theta) {
//Theta *= exp(-w*(vC - 0.4e-4 / T_use_theta - w)*1.0e-12
// / (0.25*(vC - 0.4e-4 / T_use_theta)*(vC - 0.4e-4 / T_use_theta)*T_use_theta));
// Multiply through to save on divisions?:
Theta *= exp(-w0z*((vAC - w0z)* T_use_theta - 0.4e-4)*1.0e-12 /
(0.25*(vAC* T_use_theta - 0.4e-4)*(vAC* T_use_theta - 0.4e-4)));
};
// Available KE:
f64 Kconv = 0.5*m_e*m_n*n_k*n_n_k*(w0z*w0z) / (m_e*n_k + m_n*n_n_k);
f64 coeff_on_ionizing = 0.5*T_k.Tn - 2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv);
// Now compute f(Tk) = T_k+1 given using T_k
f64 w = sqrt(0.5*(w0z*w0z + (v.vxy.x - v_n.x)*(v.vxy.x - v_n.x) + (v.vxy.y - v_n.y)*(v.vxy.y - v_n.y))); // CORRECTION FACTOR 0.5 ...
f64 T_image1, T2, T_image2, T_oldimage1, Tkplus2minus1;
hn = h_use*n_k;
hnn = h_use*n_k*n_k;
f64 T1 = T_use; // first go. = Tk if b_useTuse == false.
{
Gamma_ion = GetIonizationRates(T1, w, &Gamma_rec);
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
T_image1 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
}
T2 = T_image1;
// Skip over algorithm:
if (Delta_ionise != Delta_ionise) printf("Nandelta %d Tuse %1.10E w %1.8E Gamma %1.10E rec %1.10E w0z %1.10E Kconv %1.10E\n", iVertex, T_use, w, Gamma_ion, Gamma_rec, w0z, Kconv);
//if (iVertex == 16700) printf("Delta_ionise %1.10E rec %1.10E \n", Delta_ionise, Delta_rec);
//f64 TeeV = T1/ kB;
//f64 Tesq = TeeV*TeeV;
//f64 Te3 = TeeV*Tesq;
//f64 Te4 = Tesq*Tesq;
//f64 calc1 = (ionize_coeffs[0][0][4] + ionize_coeffs[0][0][3] * TeeV
// + ionize_coeffs[0][0][2] * Tesq + ionize_coeffs[0][0][1] * Te3
// + ionize_coeffs[0][0][0] * Te4);
//f64 calc2 = (ionize_coeffs[0][0][0] + ionize_coeffs[0][0][1] * TeeV
// + ionize_coeffs[0][0][2] * Tesq + ionize_coeffs[0][0][3] * Te3
// + ionize_coeffs[0][0][4] * Te4);
//if (iVertex == 16700) printf("ionize_coeffs[0][0] %1.12E %1.12E %1.12E %1.12E %1.12E \n"
// "TeeV %1.12E calc1 %1.12E calc2 %1.12E exp(calc1) %1.12E exp(calc2) %1.12E\n",
// ionize_coeffs[0][0][0], ionize_coeffs[0][0][1], ionize_coeffs[0][0][2], ionize_coeffs[0][0][3], ionize_coeffs[0][0][4],
// TeeV, calc1, calc2, exp(calc1), exp(calc2));
f64 dNdt_ionise = AreaMajor*Delta_ionise / h_use;
f64 dNdt_recombine = AreaMajor*Delta_rec / h_use;
ourrates.N += dNdt_ionise - dNdt_recombine;
ourrates.Nn += dNdt_recombine - dNdt_ionise;
// Store existing energy density:
f64 Energy_k = 1.5*(n_k*(T_k.Te + T_k.Ti) + n_n_k*T_k.Tn) +
0.5*((m_e + m_i)*n_k*(v.vxy.dot(v.vxy)) + m_e*n_k*v.vez*v.vez + m_i*n_k*v.viz*v.viz + m_n*n_n_k*v_n.dot(v_n));
// 1. Calculate kinetic energy absorption impact on vez, vnz
// ie Ionization resistance to current
n_kplus1 = n_k + Delta_ionise - Delta_rec;
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
// Absorbed DKE:
f64 deltaKE = -(2.0*Theta*Kconv / (3.0*n_k*T_k.Te + 2.0*Theta*Kconv))*Delta_ionise*13.6*kB;
f64 new_vz_diff = sqrt(m_e*n_kplus1 + m_n*n_n_kplus1*
((n_k*n_n_k / (m_e*n_k + m_n*n_n_k))*(w0z*w0z) + 2.0*deltaKE / (m_e*m_n)) /
n_kplus1*n_n_kplus1);
f64 delta_vez = m_n*n_n_kplus1*(w0z + new_vz_diff) /
(m_n*n_n_kplus1 + m_e*n_kplus1);
f64 delta_vnz = -m_e*n_kplus1*delta_vez / (m_n*n_n_kplus1);
// Check: w0 = vez-vnz - tick
// should change to scalar.
MAR_neut.z += AreaMajor*n_n_kplus1*delta_vnz / h_use;
MAR_elec.z += AreaMajor*n_kplus1*delta_vez / h_use;
f64_vec3 ve_kplus1, vi_kplus1, vn_kplus1;
// Store alongside: v_k+1 so that we can follow the anticipated change in energy,
// to create energy balance:
ve_kplus1.x = v.vxy.x*(n_k / n_kplus1);
ve_kplus1.y = v.vxy.y*(n_k / n_kplus1);
ve_kplus1.z = v.vez*(n_k / n_kplus1) + delta_vez; // we need to store v, we could also store nv if we wanted.
vi_kplus1.x = v.vxy.x*(n_k / n_kplus1);
vi_kplus1.y = v.vxy.y*(n_k / n_kplus1);
vi_kplus1.z = v.viz*(n_k / n_kplus1);
vn_kplus1 = v_n*(n_n_k / n_n_kplus1);
vn_kplus1.z += delta_vnz;
// 2. Add the effect of xfers on momenta:
// What does MAR_neut mean? Nv?
{
f64_vec3 v_use;
v_use.x = v.vxy.x;
v_use.y = v.vxy.y;
v_use.z = (m_e*v.vez + m_i*v.viz) / (m_e + m_i);
MAR_neut += -dNdt_ionise*v_n + dNdt_recombine*v_use;
MAR_ion += dNdt_ionise*v_n - dNdt_recombine*v_use;
MAR_elec += dNdt_ionise*v_n - dNdt_recombine*v_use;
vn_kplus1 -= (Delta_ionise*v_n - Delta_rec*v_use) / n_n_kplus1;
// n_k+1 v_k+1 = n_k v_k + Delta_n*v_use => v_k+1 = (n_k/n_k+1) v_k + (Delta_n/n_k+1) v_use
vi_kplus1 += (Delta_ionise*v_n - Delta_rec*v_use) / n_kplus1;
ve_kplus1 += (Delta_ionise*v_n - Delta_rec*v_use) / n_kplus1;
}
if (MAR_elec.z != MAR_elec.z) printf("ivertex %d MAR_elec nan\n", iVertex);
// . Ionization cooling & recombination heating
//f64 coeff_on_ionizing = 0.5*T_k.Tn - 2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv);
// ourrates.NeTe +=
// dNdt_recombine*2.0*13.6*kB / 3.0
// - (2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv))*dNdt_ionise;
// We can drop this: it will be accounted for by the final energy balance.
// 3. Add to nT for x-fers due to species converting
ourrates.NiTi += 0.5*dNdt_ionise*T_k.Tn;
ourrates.NeTe += 0.5*dNdt_ionise*T_k.Tn;
ourrates.NnTn -= dNdt_ionise*T_k.Tn;
f64 nTe_kplus1 = T_k.Te*(n_k)+0.5*Delta_ionise*T_k.Tn;
f64 nTi_kplus1 = T_k.Ti*(n_k)+0.5*Delta_ionise*T_k.Tn;
f64 n_nTn_kplus1 = T_k.Tn*(n_n_k)-Delta_ionise*T_k.Tn;
// 4. Energy balance through Te:
// Maybe we should rather be seeking OVERALL energy balance where KE_result is from n_k+1, v_k+1
// and we ensure that we have lost the right amount of energy overall.
// That is the better way:
f64 KE_result = 0.5*(m_e*n_kplus1*ve_kplus1.dot(ve_kplus1) + m_i*n_kplus1*vi_kplus1.dot(vi_kplus1)
+ m_n*n_n_kplus1*vn_kplus1.dot(vn_kplus1));
f64 Energy_density_kplus1 = KE_result + 1.5*(nTe_kplus1 + nTi_kplus1 + n_nTn_kplus1);
f64 Energy_density_target = Energy_k - 13.6*kB*(Delta_ionise - Delta_rec);
// Additional_heat = (KE_k + deltaKE) - KE_result; // usually positive
// 1*1+3*3 > 2*2 + 2*2 so KE is generally decreasing by friction; KE_result < KE_k+deltaKE
// KE_result + Added_heat + existing heat = desired total energy = KE_k + heat_k + deltaKE
// 1.5 nT += Frictional_heating
// NTe += (2/3) Area Frictional_heating
ourrates.NeTe += 2.0*AreaMajor*
(Energy_density_target - Energy_density_kplus1) / (3.0*h_use);
// All this stuff is wrong - see full routine.
// DEBUG:
if (TEST_IONIZE) printf("iVertex %d n_k %1.9E N_k %1.9E Te_k %1.9E NeTe %1.9E h*NeTe %1.9E \n"
"Ti_k %1.9E h*NiTi %1.9E Tn_k %1.9E h*NnTn %1.9E \n"
"Delta_ionise %1.9E rec %1.9E \n",
iVertex, n_k, n_k*AreaMajor, T_k.Te, ourrates.NeTe, h_use*ourrates.NeTe,
T_k.Ti, h_use*ourrates.NiTi, T_k.Tn, h_use*ourrates.NnTn,
Delta_ionise, Delta_rec
);
// DEBUG:
if (n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use < 0.0)
printf("%d Predicted Te %1.9E \n", iVertex, (n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use)/(n_k*AreaMajor));
// DEBUG:
if ((ourrates.NeTe != ourrates.NeTe)) printf("Nan NeTe %d \n", iVertex);
if ((ourrates.NeTe != ourrates.NeTe)) printf("Nan NeTe %d \n", iVertex);
if (MAR_elec.z != MAR_elec.z) printf("Nan MAR_elec.z %d \n", iVertex);
if (MAR_elec.x != MAR_elec.x) printf("Nan MAR_elec.x %d \n", iVertex);
if (MAR_neut.x != MAR_neut.x) printf("Nan MAR_neut.x %d \n", iVertex);
if (MAR_ion.y != MAR_ion.y) printf("Nan MAR_ion.y %d \n", iVertex);
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
memcpy(p_MAR_neut + iVertex, &MAR_neut, sizeof(f64_vec3));
memcpy(p_MAR_ion + iVertex, &MAR_ion, sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex, &MAR_elec, sizeof(f64_vec3));
//******************************************************************************************************
//// f64 TeV = T.Te * one_over_kB;
//// We loaded in ourrates.NT which indicates the new heat available so we should include some of that.
//// The main impact will be from heat conduction; dN/dt due to advection neglected here.
//f64 TeV = one_over_kB * (T.Te*our_n.n*AreaMajor + h_use*ourrates.NeTe)/
// (our_n.n*AreaMajor + h_use*ourrates.N);
//// Should be very careful here: ourrates.NeTe can soak to neutrals on timescale what? 1e-11?
//if (TeV < 0.0) {
// printf("\n\niVertex %d : ourrates.N %1.14E denominator %1.14E \n"
// " AreaMajor %1.14E TeV %1.14E ourrates.NeTe %1.10E h %1.10E \n"
// "ourrates.Nn %1.10E n %1.10E n_n %1.10E Te %1.10E Tn %1.10E \n\n",
// iVertex, ourrates.N,
// (our_n.n*AreaMajor + h_use*ourrates.N),
// AreaMajor, TeV, ourrates.NeTe, h_use,
// ourrates.Nn, our_n.n, our_n.n_n, T.Te, T.Tn);
//
//}
//f64 sqrtT = sqrt(TeV);
//f64 temp = 1.0e-5*exp(-13.6 / TeV) / (13.6*(6.0*13.6 + TeV)); // = S / T^1/2
// // Let h n n_n S be the ionising amount,
// // h n S is the proportion of neutrals! Make sure we do not run out!
////f64 hnS = (h_use*our_n.n*TeV*temp) / (sqrtT + h_use * our_n.n_n*temp*SIXTH*13.6);
// // d/dt (sqrtT) = 1/2 dT/dt T^-1/2.
// // dT[eV]/dt = -TWOTHIRDS * 13.6* n_n* sqrtT *temp
// // d/dt (sqrtT) = -THIRD*13.6*n_n*temp;
//// kind of midpoint, see SIXTH not THIRD:
//f64 Model_of_T_to_half = TeV / (sqrtT + h_use*SIXTH*13.6*our_n.n_n*temp / (1.0 - h_use*(our_n.n_n - our_n.n)*temp*sqrtT));
//f64 hS = h_use*temp*Model_of_T_to_half;
//
//// NEW:
//f64 ionise_rate = AreaMajor * our_n.n_n * our_n.n*hS /
// (h_use*(1.0 + hS*(our_n.n-our_n.n_n))); // dN/dt
//ourrates.N += ionise_rate;
//ourrates.Nn += -ionise_rate;
//// Let nR be the recombining amount, R is the proportion.
//TeV = T.Te * one_over_kB;
//f64 Ttothe5point5 = sqrtT * TeV * TeV*TeV * TeV*TeV;
//f64 hR = h_use * (our_n.n * our_n.n*8.75e-27*TeV) /
// (Ttothe5point5 + h_use * 2.25*TWOTHIRDS*13.6*our_n.n*our_n.n*8.75e-27);
//// T/T^5.5 = T^-4.5
//// T/(T^5.5+eps) < T^-4.5
//// For some reason I picked 2.25 = 4.5/2 instead of 5.5/2.
//// But basically it looks reasonable.
//// Maybe the additional stuff is an estimate of the change in T[eV]^5.5??
//// d/dt T^5.5 = 5.5 T^4.5 dT/dt
//// dT/dt = TWOTHIRDS * 13.6*( hR / h_use) = TWOTHIRDS * 13.6*( n^2 8.75e-27 T^-4.5)
//// d/dt T^5.5 = 5.5 TWOTHIRDS * 13.6*( n^2 8.75e-27 )
//f64 recomb_rate = AreaMajor * our_n.n * hR / h_use; // could reasonably again take hR/(1+hR) for n_k+1
//ourrates.N -= recomb_rate;
//ourrates.Nn += recomb_rate;
//if (TEST) printf("%d recomb rate %1.10E ionise_rate %1.10E our_n.n %1.10E nn %1.10E hR %1.10E hS %1.10E\n"
// "h_use %1.8E sqrtTeV %1.10E Ttothe5point5 %1.9E Te %1.9E modelThalf %1.9E\n", iVertex,
// recomb_rate, ionise_rate, our_n.n, our_n.n_n, hR, hS, h_use, sqrtT, Ttothe5point5, T.Te, Model_of_T_to_half);
//ourrates.NeTe += -TWOTHIRDS * 13.6*kB*(ionise_rate - recomb_rate) + 0.5*T.Tn*ionise_rate;
//ourrates.NiTi += 0.5*T.Tn*ionise_rate;
//ourrates.NnTn += (T.Te + T.Ti)*recomb_rate;
//if (TEST) {
// printf("kernelIonisation %d NeTe %1.12E NiTi %1.12E NnTn %1.12E\n"
// "due to I+R : NeTe %1.12E NiTi %1.12E NnTn %1.12E\n"
// "d/dtNeTe/N %1.9E d/dtNiTi/N %1.9E d/dtNnTn/Nn %1.9E \n\n",
// iVertex, ourrates.NeTe, ourrates.NiTi, ourrates.NnTn,
// -TWOTHIRDS * 13.6*kB*(ionise_rate - recomb_rate) + 0.5*T.Tn*ionise_rate,
// 0.5*T.Tn*ionise_rate,
// (T.Te + T.Ti)*recomb_rate,
// ourrates.NeTe / (our_n.n*AreaMajor), ourrates.NiTi / (our_n.n*AreaMajor), ourrates.NnTn / (our_n.n_n*AreaMajor));
//};
//memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
};
}
__global__ void kernelIonisationRates(
f64 const h_use,
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_major,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
NTrates * __restrict__ NTadditionrates,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
// We are in major cells so actually output this to a fresh temp array (9 scalars)
// which we then share out into minor cells.
v4 * __restrict__ p_v,
f64_vec3 * __restrict__ p_v_n,
T3 * __restrict__ p_T_use_major,
bool b_useTuse
)
{
#define SAFETY_FACTOR 1.2
#define LEEWAY 1.0e-23
#define vAC 218687393.0 // Alfven Critical velocity = sqrt(13.6*1.6e-12*2/me)
long const iVertex = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
NTrates ourrates;
f64_vec3 MAR_neut, MAR_ion, MAR_elec;
v4 v;
f64_vec3 v_n;
f64 T_use;
bool bZero_out = false;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
// case DOMAIN_VERTEX:
f64 lambda;
f64 AreaMajor = p_AreaMajor[iVertex];
T3 T_k = p_T_major[iVertex];
if (b_useTuse) {
T3 T = p_T_use_major[iVertex];
T_use = T.Te;
} else {
T_use = T_k.Te;
}
nvals our_n = p_n_major[iVertex];
f64 fac_uplift = ArtificialUpliftFactor(our_n.n, our_n.n_n);
memcpy(&ourrates, NTadditionrates + iVertex, sizeof(NTrates));
memcpy(&MAR_neut, p_MAR_neut + iVertex, sizeof(f64_vec3)); // are we passing stuff from central then?
memcpy(&MAR_ion, p_MAR_ion + iVertex, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec + iVertex, sizeof(f64_vec3)); // it does mean d/dt (Nv)
memcpy(&v, p_v + iVertex, sizeof(v4));
memcpy(&v_n, p_v_n + iVertex, sizeof(f64_vec3));
if (TEST_IONIZE) printf("iVertex %d ourrates.NeTe original %1.10E \n", iVertex, ourrates.NeTe);
// 0 . What is lambda?
f64 oldT1;
f64 n_k = our_n.n;
f64 n_n_k = our_n.n_n;
f64 n_kplus1, n_n_kplus1, n_kplus2;
f64 Gamma_ion, Gamma_rec, hn, hnn, Delta_ionise, Delta_rec;
// lambda = 0.5*reduced mass*w0.dot(w0) / T_k.Te;
f64 w0z = v.vez - v_n.z;
// What is capital Theta of T_k ?
//f64 w = sqrt(w0z*w0z); // WE ARE ONLY USING Z DIMENSION FOR ABSORBING KINETIC ENERGY
// Check again: how did we come up with the following formulas?
// Off of the lambda spreadsheet or the v spreadsheet? I think lambda.
f64 T_use_theta = T_k.Te;
if (T_use_theta < 1.0e-12) T_use_theta = 1.0e-12;
f64 Theta = (1.1 + 0.4e-12 / T_use_theta);
if (w0z < vAC - 0.4e-4 / T_use_theta) {
//Theta *= exp(-w*(vC - 0.4e-4 / T_use_theta - w)*1.0e-12
// / (0.25*(vC - 0.4e-4 / T_use_theta)*(vC - 0.4e-4 / T_use_theta)*T_use_theta));
// Multiply through to save on divisions?:
Theta *= exp(-w0z*((vAC - w0z)* T_use_theta - 0.4e-4)*1.0e-12 /
(0.25*(vAC* T_use_theta - 0.4e-4)*(vAC* T_use_theta - 0.4e-4)));
};
// Available KE:
f64 Kconv = 0.5*m_e*m_n*n_k*n_n_k*(w0z*w0z) / (m_e*n_k + m_n*n_n_k);
if (TEST_IONIZE) printf("iVertex %d w0z %1.10E Kconv %1.10E Theta %1.9E \n", iVertex,
w0z, Kconv, Theta);
f64 coeff_on_ionizing = 0.5*T_k.Tn - 2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv);
// Now compute f(Tk) = T_k+1 given using T_k
f64 w = sqrt(0.5*(w0z*w0z + (v.vxy.x - v_n.x)*(v.vxy.x - v_n.x) + (v.vxy.y - v_n.y)*(v.vxy.y - v_n.y))); // CORRECTION FACTOR 0.5 ...
// ================
// Made a mistake and saved data for v that is sqrt(2) times greater by missing 0.5 out of lambda
// so
// data for "1e7" is actually for 1.4e7. Thus pass 1/sqrt(2) times our velocity
f64 T_image1, T2, T_image2, T_oldimage1, Tkplus2minus1;
hn = h_use*n_k;
hnn = h_use*n_k*n_k;
f64 T1 = T_use; // first go. = Tk if b_useTuse == false.
{
// if (TEST_IONIZE) {
// Gamma_ion = GetIonizationRatesDebug(T1, w, &Gamma_rec);
// } else {
Gamma_ion = fac_uplift*GetIonizationRates(T1, w, &Gamma_rec);
// };
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
T_image1 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec)/ n_kplus1;
}
T2 = T_image1;
int sign_k = (T2 - T1 > 0.0) ? 1 : -1; // usually -1 , ie negative rate of change, net ionization
// Torigmove = T2 - T1; -- no, it's T_image-T_k that we wanna use.
if (TEST_IONIZE) printf("iVertex %d original move T2 %1.9E T1 %1.9E \n", iVertex, T2, T1);
// X
// it's ok to use sign_k for the sign of the T_use move
// because if it's different sign to fwd move we never do overshooting test
// But what about if it's T<0? so fwd is recombining but new shift of T
// brings T_k+1<0.
// In that case we should be detecting it right here.
// First check if fwd next temperature would be negative:
bool bAccept = false;
// Try allowing to access the b_test loop:
bool b_test = b_useTuse;
// check that this brings back the 77 - it doesn't
if ((T2 < 0.0) && (b_useTuse) && (T_use > T_k.Te))
{
// in this case we should switch to T1 = T_k:
f64 T1 = T_k.Te; // first go. = Tk if b_useTuse == false.
{
Gamma_ion = fac_uplift*GetIonizationRates(T1, w, &Gamma_rec);
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
T_image1 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
}
T2 = T_image1;
sign_k = (T2 - T1 > 0.0) ? 1 : -1;
if (TEST_IONIZE) printf("iVertex %d switch to Tk \n", iVertex);
// X
// and turn off tests below involving assn of move?
b_test = false;
}
// DEBUG 2 -- it worked with this bit and b_test cut out
if (T2 < 0.0) {
while (T2 < 0.0) {
oldT1 = T1;
T1 *= 0.5;
T_oldimage1 = T_image1; // save
// Compute image of T1:
{
Gamma_ion = fac_uplift*GetIonizationRates(T1, w, &Gamma_rec);
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
T_image1 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
}
T2 = T_image1;
if (TEST_IONIZE) printf("iVertex %d T<0 loop: T2 %1.9E \n", iVertex, T2);
// X
};
// 3(a) If 2^-i Tk is an acceptable point, accept it.
if (T_image1 - T1 > 0.0) // T is now rising -- we crossed T_bwd from T_k
{ // Note: bwd criterion: T_image(use T_use from T_k) element (0, fwd image of Tk)
// 3.(c) If 2^-i T_k is lower than a bwd step, proceed to main loop with
// 2^-i T_k as T_far and 2^-(i-1) T_k as T_near:
T2 = T1; // "left point" (right if we were ascending)
T1 *= 2.0; // "right point" -- may be T_k itself
bAccept = false;
T_image1 = T_oldimage1;
T_image2 = T_image1;
if (TEST_IONIZE) printf("iVertex %d T now rising: T1 %1.9E T2 %1.9E \n", iVertex, T1, T2);
// X
} else {
// Test T1 for overshooting:
// T2 is already defined as image of T1
Gamma_ion = fac_uplift*GetIonizationRates(T2, w, &Gamma_rec);
n_kplus2 = n_kplus1 + h_use*n_n_kplus1*n_kplus1*Gamma_ion -
h_use*n_kplus1*n_kplus1*n_kplus1*Gamma_rec;
Tkplus2minus1 = (n_kplus1 / n_kplus2 - 1.0)*T2 +
coeff_on_ionizing*h_use*n_n_kplus1*Gamma_ion +
TWOTHIRDS*13.6*kB*h_use*n_kplus1*n_kplus1*Gamma_rec;
// compare this inflated difference of Tnext from T2
// with the difference T2-Tk :
if (Tkplus2minus1 < 0.0)// same sign as move from Tk to T2
{
bAccept = true; // accept this move Delta(T1)
} else {
// Test that the reversed magnitude is smaller.
bAccept = (SAFETY_FACTOR*fabs(Tkplus2minus1) < fabs(T2-T_k.Te) + LEEWAY);
};
if (bAccept == false) {
// Overshooting:
if (TEST_IONIZE) printf("iVertex %d T overshooting 1\n", iVertex);
// Y
// No adjustment to T1, T2 needed.
// Compute image of T2 under f_k:
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
T_image2 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
// We want this defined when we enter secant loop.
};
};
} else { // fwd T, or first step with T_use, didn't go below zero
// 4. If we are so close to equilibrium that the proposed
// change in temperature is tiny like 10^{-9}T then just set
// the actual ionization to 0. We have only 10^{-6}/10^{-13}=10^{7} steps.
// Changed factor to 1.0e-10
if (fabs(T_image1 - T_k.Te) < 1.0e-10*T_k.Te) {
// do nothing for a tiny move:
Delta_ionise = 0.0;
Delta_rec = 0.0;
bAccept = true;
if (TEST_IONIZE) printf("iVertex %d small move accepted\n", iVertex);
// Y
bZero_out = true;
// To move 10% would take 1e8 moves, we have only 1e-6/1e-13 = 1e7.
} else {
if (TEST_IONIZE) printf("%d b_test %d \n",iVertex, (b_test ? 1 : 0));
// Y
if (b_test) {
// in this case we now want to check whether our move
// is the same sign as the T_k move.
// We can check the sign_k just by evaluating ionization rates at T_k
// No, we pretty much need to work out which one is winning out.
Gamma_ion = fac_uplift*GetIonizationRates(T_k.Te, w, &Gamma_rec);
f64 Delta_ionise_k = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
f64 Delta_rec_k = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise_k - Delta_rec_k; // Delta_rec is amount recombining.
f64 T_image_k = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
if (TEST_IONIZE) printf("iVertex %d bTest was true; T_image_k %1.9E T_k %1.9E T_image1 %1.9E T1 %1.9E\n",
iVertex, T_image_k, T_k.Te, T_image1, T1);
// Y
if (((T_image_k > T_k.Te) && (T_image1 < T1))
||
((T_image_k < T_k.Te) && (T_image1 > T1)))
{
// different sign:
// If it's different sign, either accept it if the move
// brings us the f(Tk) side of Tk, or set it to 0
// if the move would take us the opposite direction from Tk.
bAccept = true;
// We have not changed Delta_ionize
if (((T_image_k > T_k.Te) && (T_image1 < T_k.Te))
||
((T_image_k < T_k.Te) && (T_image1 > T_k.Te)))
{
Delta_ionise = 0.0;
Delta_rec = 0.0;
// A better solution may exist.
if (TEST_IONIZE) printf("iVertex %d setted Delta_ionise to 0\n", iVertex);
// Z
};
} else {
// If it's the same sign, pass to the following code
// which asks if it is overshooting.
// We have not changed T2 or T1 or T_image1
if (TEST_IONIZE) printf("iVertex %d pass to secant loop\n", iVertex);
// Z
};
};
// putative Fwd Euler move neither had T < 0 nor was tiny.
// Overshooting test for Fwd Euler:
if (TEST_IONIZE) printf("iVertex %d overshooting test for Fwd move\n", iVertex);
// Z
bAccept = false;
Gamma_ion = fac_uplift*GetIonizationRates(T2, w, &Gamma_rec);
n_kplus2 = n_kplus1 + h_use*n_n_kplus1*n_kplus1*Gamma_ion -
h_use*n_kplus1*n_kplus1*n_kplus1*Gamma_rec;
// Comparator:
Tkplus2minus1 = (n_kplus1 / n_kplus2 - 1.0)*T2 +
coeff_on_ionizing*h_use*n_n_kplus1*Gamma_ion +
TWOTHIRDS*13.6*kB*h_use*n_kplus1*n_kplus1*Gamma_rec;
// compare this inflated difference of Tnext from T2 with the difference T2-T1:
if (((Tkplus2minus1 > 0.0) && (sign_k > 0))
||
((Tkplus2minus1 < 0.0) && (sign_k < 0)))
{
bAccept = true; // Accept forward Euler move; Delta was set.
// Or on main step, accept "T_k+1/2" move
if (TEST_IONIZE) printf("iVertex %d comparator same sign; accept\n",iVertex);
// Z
} else {
bAccept = (SAFETY_FACTOR*fabs(Tkplus2minus1) < fabs(T2-T_k.Te)+LEEWAY);
// Accept only if the comparator is smaller in magnitude.
if (TEST_IONIZE) printf("iVertex %d comparison %1.10E vs %1.10E\n", iVertex,
SAFETY_FACTOR*fabs(Tkplus2minus1) , fabs(T2 - T_k.Te) + LEEWAY);
// Z
};
// got rid of probs by commenting from here
if (bAccept == false) {
// construct f_k image of T2 for use in secant:
f64 hnGamma_ion = h_use*Gamma_ion*n_k;
f64 hnnGamma_rec = h_use*Gamma_rec*n_k*n_k;
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
T_image2 = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
}
}; // whether small move
}; // whether fwd T < 0
// Main loop:
int ctr = 0;
while ((bAccept == false) && (ctr < 100)){
++ctr;
// max 100 iterations but I don't see any reason hard limit will be needed.
// Calculate secant from existing points:
// We have T1, T2 coming in
// T1 is the one closer to T_k, T2 is the other side of bwd T
f64 T_sec = (T2*T_image1 - T1*T_image2) / (T2 - T_image2 - T1 + T_image1);
// This approximates a backward step.
// Try 'midpoint': we want to be on fwd side of bwd step
f64 T_est = 0.5*(T_sec + T1);
// Calculate image starting from T_k and using T_est
//if (TEST_IONIZE) {
//Gamma_ion = GetIonizationRatesDebug(T_est, w, &Gamma_rec);
//} else {
Gamma_ion = fac_uplift*GetIonizationRates(T_est, w, &Gamma_rec);
//};
Delta_ionise = (n_n_k*hn*Gamma_ion + (n_n_k + n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
Delta_rec = (n_k*hnn*Gamma_rec + (n_k + n_n_k)*hnn*Gamma_rec*hn*Gamma_ion) /
((1.0 + hn*Gamma_ion)*(1.0 + hnn*Gamma_rec) - hnn*Gamma_rec*hn*Gamma_ion);
// *** Set for the move we are testing ***
n_kplus1 = n_k + Delta_ionise - Delta_rec; // Delta_rec is amount recombining.
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
f64 T_image_est = (n_k*T_k.Te + coeff_on_ionizing*Delta_ionise + TWOTHIRDS*13.6*kB*Delta_rec) / n_kplus1;
if (TEST_IONIZE) printf("iVertex %d secant loop T1 %1.9E T2 %1.9E T_est %1.9E T_image_est %1.9E Gamma_ion %1.9E Gamma_rec %1.9E\n",
iVertex, T1, T2, T_est, T_image_est, Gamma_ion, Gamma_rec);
// A - worked with
bAccept = false;
if ((T_image_est < 0.0) && (sign_k < 0)) // If T goes negative it does count as overshooting, supposing original dT was decreasing.
{
// Can only get here if non-monotonic?
// overshooting:
T1 = T_est;
T_image1 = T_image_est;
bAccept = false;
} else {
// between bwd & fwd: T_image_est - T_est between 0 and Torigmove.
if (
((sign_k > 0) && (T_image_est - T_est < 0.0))
|| // going wrong way starting from T_k: therefore beyond bwd
((sign_k < 0) && (T_image_est - T_est > 0.0))
)
{
T2 = T_est;
T_image2 = T_image_est;
bAccept = false;
if (TEST_IONIZE) printf("iVertex %d : T_est beyond bwd\n", iVertex);
// A - works with
// This is beyond bwd but do we need to check that we are not exceeding a fwd.
// .. If we got here then the fwd step is overshooting so that greater move would presumably be overshooting also.
// We now defined sign_k based on the requested T's move
// Therefore can we say that
// 1. T2 is on the opposite side of T_use from T_k
// 2. Moving back towards T_use will achieve the same sign as T_use
// ... it is possible that we were given T_use the other side of bwd so then
// we are stuck? can it be the other side of bwd, overshoot T_k away from itself ---
// we need to address separate cases.
} else {
// Test overshooting:
bAccept = false;
if (TEST_IONIZE) printf("iVertex %d : T_est overshooting test\n", iVertex);
// A - works with
// T_image_est is > 0 if we got here.
Gamma_ion = fac_uplift*GetIonizationRates(T_image_est, w, &Gamma_rec);
// Comparator:
n_kplus2 = n_kplus1 + h_use*n_n_kplus1*n_kplus1*Gamma_ion -
h_use*n_kplus1*n_kplus1*n_kplus1*Gamma_rec;
// Comparator:
Tkplus2minus1 = (n_kplus1 / n_kplus2 - 1.0)*T2 +
coeff_on_ionizing*h_use*n_n_kplus1*Gamma_ion +
TWOTHIRDS*13.6*kB*h_use*n_kplus1*n_kplus1*Gamma_rec;
// compare this inflated difference of Tnext from T2 with the difference T2-T1:
if (((Tkplus2minus1 > 0.0) && (T2-T_k.Te > 0.0))
||
((Tkplus2minus1 < 0.0) && (T2-T_k.Te < 0.0)))
{
bAccept = true; // same sign onward => not overshooting eqm
if (TEST_IONIZE) printf("iVertex %d comparator same sign\n", iVertex);
// A
} else {
bAccept = (SAFETY_FACTOR*fabs(Tkplus2minus1) < fabs(T2-T_k.Te)+LEEWAY);
if (TEST_IONIZE) printf("iVertex %d comparison %1.10E vs %1.10E\n",
SAFETY_FACTOR*fabs(Tkplus2minus1), fabs(T2 - T_k.Te) + LEEWAY);
// A
// Accept only if the comparator is smaller in magnitude.
if (bAccept == false) {
T1 = T_est; // still overshooting
T_image1 = T_image_est;
};
};
};
};
}; // end while: bAccept == true
// Now calculate what to do, given this move:
// ==========================================
// Aim in the above:
// Delta_ionise and delta_rec should be already set.
// This bit is not perfect. It breaks down the changes into steps.
// 0. What is ROC of N:
if (bZero_out) {
// do nothing to MAR_elec, ourrates etc
} else {
f64 dNdt_ionise = AreaMajor*Delta_ionise / h_use;
f64 dNdt_recombine = AreaMajor*Delta_rec / h_use;
ourrates.N += dNdt_ionise - dNdt_recombine;
ourrates.Nn += dNdt_recombine - dNdt_ionise;
if (TEST_IONIZE) printf("Delta_ionise %1.10E Delta_rec %1.10E ourrates.N %1.10E \n",
Delta_ionise, Delta_rec, ourrates.N);
// Store existing energy density:
// f64 Energy_k = 1.5*(n_k*(T_k.Te + T_k.Ti) + n_n_k*T_k.Tn) +
// 0.5*((m_e + m_i)*n_k*(v.vxy.dot(v.vxy)) + m_e*n_k*v.vez*v.vez + m_i*n_k*v.viz*v.viz + m_n*n_n_k*v_n.dot(v_n));
// 1. Calculate kinetic energy absorption impact on vez, vnz
// ie Ionization resistance to current
n_kplus1 = n_k + Delta_ionise - Delta_rec;
n_n_kplus1 = n_n_k - Delta_ionise + Delta_rec;
// Absorbed DKE:
f64 deltaKE = -(2.0*Theta*Kconv / (3.0*n_k*T_k.Te + 2.0*Theta*Kconv))*Delta_ionise*13.6*kB;
f64 safe_argument = (m_e*n_kplus1 + m_n*n_n_kplus1)*
((n_k*n_n_k / (m_e*n_k + m_n*n_n_k))*(w0z*w0z) + 2.0*deltaKE / (m_e*m_n)) /
(n_kplus1*n_n_kplus1);
f64 new_vz_diff;
if (safe_argument >= 0.0) {
new_vz_diff = sqrt(safe_argument);
} else {
new_vz_diff = 0.0; // This is just to stop it crashing, the theory of why new_vz_diff sqrt should have +ve argument, I have not revisited.
};
// I guess it can be -ve.
// Choose new_vz_diff to have same sign as w0z = diff_k:
if (w0z < 0.0) new_vz_diff = -new_vz_diff;
f64 delta_vez = m_n*n_n_kplus1*(-w0z + new_vz_diff) /
(m_n*n_n_kplus1 + m_e*n_kplus1);
f64 delta_vnz = -m_e*n_kplus1*delta_vez / (m_n*n_n_kplus1);
if (TEST_IONIZE) printf("deltaKE %1.10E v.vez %1.10E w0z %1.9E new_vz_diff %1.9E delta_vez %1.9E\n",
deltaKE, v.vez, w0z, new_vz_diff, delta_vez);
// Check: w0 = vez-vnz - tick
// should change to scalar.
MAR_neut.z += AreaMajor*n_n_kplus1*delta_vnz / h_use;
MAR_elec.z += AreaMajor*n_kplus1*delta_vez / h_use;
f64_vec3 ve_kplus1, vi_kplus1, vn_kplus1;
f64_vec3 v_use;
v_use.x = v.vxy.x;
v_use.y = v.vxy.y;
v_use.z = (m_e*v.vez + m_i*v.viz) / (m_e + m_i);
// Store alongside: v_k+1 so that we can follow the anticipated change in energy,
// to create energy balance:
ve_kplus1.x = v.vxy.x*(n_k / n_kplus1);
ve_kplus1.y = v.vxy.y*(n_k / n_kplus1);
ve_kplus1.z = v.vez*(n_k / n_kplus1) + delta_vez; // we need to store v, we could also store nv if we wanted.
vi_kplus1.x = v.vxy.x*(n_k / n_kplus1);
vi_kplus1.y = v.vxy.y*(n_k / n_kplus1);
vi_kplus1.z = v.viz*(n_k / n_kplus1);
vn_kplus1 = v_n*(n_n_k / n_n_kplus1); // Check in accel routine to be sure this will ever actually happen
vn_kplus1.z += delta_vnz;
// Does it happen automatically or do we need to include the n_k+1/n_k effect in MAR_ ????
// It does NOT work automatically !!!
// We have to include the effect here ---- stupid us, don't know why doing it this way.
// Where used: v0.viz = vie_k.viz + h_use * MAR.z / (n_use.n*AreaMinor);
// and n_use is the target n_kplus1.
// MAR_neut += AreaMajor*n_n_kplus1*(v_n*(n_n_k/ n_n_kplus1) - v_n) / h_use;
MAR_neut += AreaMajor*(v_n*(n_n_k - n_n_kplus1)) / h_use;
MAR_ion += AreaMajor*(Make3(v.vxy,v.viz)*(n_k - n_kplus1)) / h_use;
MAR_elec += AreaMajor*(Make3(v.vxy,v.vez)*(n_k - n_kplus1)) / h_use;
// diluting v..
// 2. Add the effect of xfers on momenta:
// Let's think about this clearly:
// v_k+1 = (1/n_k+1) (n_k v_k + delta_ionize v_n - delta_rec v_use)
// = v_k + h * MAR / (n_k+1 * Area);
// MAR(h/area) = (v_k+1 - v_k)(n_k+1)
// = ( n_k v_k + delta_ionize v_n - delta_rec v_use - v_k n_k+1)
// We split the change into local minor cells, hopefully correctly.
vn_kplus1 -= (Delta_ionise*v_n - Delta_rec*v_use) / n_n_kplus1;
// n_k+1 v_k+1 = n_k v_k + Delta_n*v_use => v_k+1 = (n_k/n_k+1) v_k + (Delta_n/n_k+1) v_use
vi_kplus1 += (Delta_ionise*v_n - Delta_rec*v_use) / n_kplus1;
ve_kplus1 += (Delta_ionise*v_n - Delta_rec*v_use) / n_kplus1;
MAR_neut += -dNdt_ionise*v_n + dNdt_recombine*v_use; // area*delta_n/h * v_use
MAR_ion += dNdt_ionise*v_n - dNdt_recombine*v_use;
MAR_elec += dNdt_ionise*v_n - dNdt_recombine*v_use;
if (TEST_IONIZE) printf("__ ve_kplus1 %1.9E %1.9E %1.9E vi_plus1 %1.9E %1.9E %1.9E vn_plus1 %1.9E %1.9E %1.9E delta_vez %1.9E\n",
ve_kplus1.x, ve_kplus1.y, ve_kplus1.z, vi_kplus1.x, vi_kplus1.y, vi_kplus1.z,
vn_kplus1.x, vn_kplus1.y, vn_kplus1.z, delta_vez);
// . Ionization cooling & recombination heating
//f64 coeff_on_ionizing = 0.5*T_k.Tn - 2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv);
// ourrates.NeTe +=
// dNdt_recombine*2.0*13.6*kB / 3.0
// - (2.0*T_k.Te*13.6*kB*n_k / (3.0*T_k.Te*n_k + 2.0*Theta*Kconv))*dNdt_ionise;
// We can drop this: it will be accounted for by the final energy balance.
// 3. Add to nT for x-fers due to species converting
// ourrates.NiTi += 0.5*dNdt_ionise*T_k.Tn;
// ourrates.NeTe += 0.5*dNdt_ionise*T_k.Tn;
// ourrates.NnTn -= dNdt_ionise*T_k.Tn; // no longer need this, it is incorporated below
// f64 nTe_kplus1 = T_k.Te*(n_k)+0.5*Delta_ionise*T_k.Tn;
// f64 nTi_kplus1 = T_k.Ti*(n_k)+0.5*Delta_ionise*T_k.Tn;
// f64 n_nTn_kplus1 = T_k.Tn*(n_n_k)-Delta_ionise*T_k.Tn;
// without any change to NeTe it would stay just the same as at timeslice k.
// 4. Energy balance through Te:
// Maybe we should rather be seeking OVERALL energy balance where KE_result is from n_k+1, v_k+1
// and we ensure that we have lost the right amount of energy overall.
// That is the better way:
// f64 KE_result = 0.5*(m_e*n_kplus1*ve_kplus1.dot(ve_kplus1) + m_i*n_kplus1*vi_kplus1.dot(vi_kplus1)
// + m_n*n_n_kplus1*vn_kplus1.dot(vn_kplus1));
// if (TEST_IONIZE) printf("n_kplus1 %1.12E ve_kplus1 %1.9E %1.9E %1.9E vi_plus1 %1.9E %1.9E %1.9E vn_plus1 %1.9E %1.9E %1.9E\n",
// n_kplus1, ve_kplus1.x, ve_kplus1.y, ve_kplus1.z, vi_kplus1.x, vi_kplus1.y, vi_kplus1.z,
// vn_kplus1.x, vn_kplus1.y, vn_kplus1.z);
f64 Energy_density_k_n = 0.5*m_n*n_n_k*v_n.dot(v_n) + 1.5*n_n_k*T_k.Tn;
f64 Energy_density_k_i = 0.5*m_i*n_k*(v.vxy.dot(v.vxy) + v.viz*v.viz) + 1.5*n_k*T_k.Ti;
f64 Energy_density_k_e = 0.5*m_e*n_k*(v.vxy.dot(v.vxy) + v.vez*v.vez) + 1.5*n_k*T_k.Te;
// The energy density given the change in velocity but zero change in heat:
f64 Energy_density_kplus1_e = 0.5*m_e*n_kplus1*ve_kplus1.dot(ve_kplus1) + 1.5*T_k.Te*n_k;
// without any change to NeTe it would stay just the same as at timeslice k.
f64 Energy_density_kplus1_i = 0.5*m_i*n_kplus1*vi_kplus1.dot(vi_kplus1) + 1.5*T_k.Ti*n_k;
f64 Energy_density_kplus1_n = 0.5*m_n*n_n_kplus1*vn_kplus1.dot(vn_kplus1) + 1.5*T_k.Tn*n_n_k;
f64 Energy_density_target_n = Energy_density_k_n + 0.5*m_n*Delta_rec*v_use.dot(v_use) + 1.5*Delta_rec*(T_k.Ti + T_k.Te)
- 0.5*m_n*Delta_ionise*v_n.dot(v_n) - 1.5*Delta_ionise*T_k.Tn;
f64 Energy_density_target_i = Energy_density_k_i + 0.5*m_i*Delta_ionise*v_n.dot(v_n) + 1.5*(m_i / m_n)*Delta_ionise*T_k.Tn
- 0.5*m_i*Delta_rec*v_use.dot(v_use) - 1.5*Delta_rec*T_k.Ti;
f64 Energy_density_target_e = Energy_density_k_e + 0.5*m_e*Delta_ionise*v_n.dot(v_n) + 1.5*(m_e / m_n)*Delta_ionise*T_k.Tn
- 0.5*m_e*Delta_rec*v_use.dot(v_use) - 1.5*Delta_rec*T_k.Te
- 13.6*kB*(Delta_ionise - Delta_rec);
// f64 Energy_density_target = Energy_k - 13.6*kB*(Delta_ionise - Delta_rec);
// Additional_heat = (KE_k + deltaKE) - KE_result; // usually positive
// 1*1+3*3 > 2*2 + 2*2 so KE is generally decreasing by friction; KE_result < KE_k+deltaKE
// KE_result + Added_heat + existing heat = desired total energy = KE_k + heat_k + deltaKE
// 1.5 nT += Frictional_heating
// NTe += (2/3) Area Frictional_heating
if (TEST_IONIZE) printf("AreaMajor %1.9E h_use %1.9E 0.6666 13.6 kB %1.9E \n"
"Energy_density_kplus1_i %1.12E target_i %1.12E k_i %1.12E \n"
"vi_k %1.10E %1.10E %1.10E kplus1 %1.10E %1.10E %1.10E\n"
"0.5*m_i*Delta_ionise*v_n.dot(v_n) %1.9E 1.5*(m_i / m_n)*Delta_ionise*T_k.Tn %1.9E\n"
"-0.5*m_i*Delta_rec*v_use.dot(v_use) %1.9E -1.5*Delta_rec*T_k.Ti %1.9E\n",
AreaMajor, h_use, 0.666667*13.6*kB,
Energy_density_kplus1_i, Energy_density_target_i, Energy_density_k_i,
v.vxy.x, v.vxy.y, v.viz, vi_kplus1.x, vi_kplus1.y, vi_kplus1.z,
0.5*m_i*Delta_ionise*v_n.dot(v_n), 1.5*(m_i / m_n)*Delta_ionise*T_k.Tn,
-0.5*m_i*Delta_rec*v_use.dot(v_use) , -1.5*Delta_rec*T_k.Ti
);
if (TEST_IONIZE) printf("ourrates.NiTi before: %1.11E \n", ourrates.NiTi);
ourrates.NeTe += 2.0*AreaMajor*
(Energy_density_target_e - Energy_density_kplus1_e) / (3.0*h_use);
ourrates.NiTi += 2.0*AreaMajor*
(Energy_density_target_i - Energy_density_kplus1_i) / (3.0*h_use);
ourrates.NnTn += 2.0*AreaMajor*
(Energy_density_target_n - Energy_density_kplus1_n) / (3.0*h_use);
if (TEST_IONIZE) printf("ourrates.NiTi after: %1.11E Area*n_kplus1 %1.9E \n\n", ourrates.NiTi,
AreaMajor*n_kplus1);
//
// if ((Energy_density_target_e - Energy_density_kplus1_e) / (n_k*AreaMajor) > 1.0e-8)
// {
// printf("Vertex %d vez(k+1) %1.9E vezk %1.9E delta_vez %1.9E\n"
// "iVertex %d n_k %1.9E N_k %1.9E Te_k %1.9E NeTe %1.9E h*NeTe %1.9E \n"
// "Ti_k %1.9E h*NiTi %1.9E Tn_k %1.9E h*NnTn %1.9E \n"
// "Delta_ionise %1.9E rec %1.9E deltaKE %1.9E deltavez %1.9E\n"
// "Predicted Te %1.12E Theta %1.12E \n"
// "Energy_k %1.12E w0z %1.9E energy_kplus1 %1.12E energy_target %1.12E \n"
// "KEk %1.12E Heat_k %1.12E \n",
// iVertex,
// ve_kplus1.z, v.vez, delta_vez,
// iVertex, n_k, n_k*AreaMajor, T_k.Te, ourrates.NeTe, h_use*ourrates.NeTe,
// T_k.Ti, h_use*ourrates.NiTi, T_k.Tn, h_use*ourrates.NnTn,
// Delta_ionise, Delta_rec, deltaKE, delta_vez,
// (n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use) / (n_k*AreaMajor),
// Theta, Energy_k, w0z, Energy_density_kplus1_e, Energy_density_target_e,
// 0.5*((m_e + m_i)*n_k*(v.vxy.dot(v.vxy)) + m_e*n_k*v.vez*v.vez + m_i*n_k*v.viz*v.viz + m_n*n_n_k*v_n.dot(v_n)),
// 1.5*(n_k*T_k.Te + n_k*T_k.Ti + n_n_k*T_k.Tn));
// printf("iVertex %d "
// "ve_kplus1 %1.9E %1.9E %1.9E vi_plus1 %1.9E %1.9E %1.9E vn_plus1 %1.9E %1.9E %1.9E\n"
// "v_n %1.9E %1.9E %1.9E n_n_k %1.9E n_n_kplus1 %1.9E n_k %1.9E n_kplus1 %1.9E \n"
// "Theta %1.10E Kconv %1.10E deltaKE %1.10E ppnK %1.10E full_loss %1.10E\n",
// iVertex,
// ve_kplus1.x, ve_kplus1.y, ve_kplus1.z,
// vi_kplus1.x, vi_kplus1.y, vi_kplus1.z,
// vn_kplus1.x, vn_kplus1.y, vn_kplus1.z,
// v_n.x, v_n.y, v_n.z, n_n_k, n_n_kplus1, n_k, n_kplus1,
// Theta, Kconv, deltaKE,
// (2.0*Theta*Kconv / (3.0*n_k*T_k.Te + 2.0*Theta*Kconv)),Delta_ionise*13.6*kB
// );
// }
// DEBUG:
if (TEST_IONIZE) printf("iVertex %d n_k %1.9E n_n_k %1.9E N_k %1.9E Te_k %1.9E NeTe %1.9E \n h*NeTe %1.9E "
"Ti_k %1.9E h*NiTi %1.9E Tn_k %1.9E h*NnTn %1.9E \n"
"Delta_ionise %1.9E rec %1.9E \n",
iVertex, n_k, n_n_k, n_k*AreaMajor, T_k.Te, ourrates.NeTe, h_use*ourrates.NeTe,
T_k.Ti, h_use*ourrates.NiTi, T_k.Tn, h_use*ourrates.NnTn,
Delta_ionise, Delta_rec
);
// DEBUG:
if (TEST_IONIZE) //n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use < 0.0)
printf("%d Predicted Te %1.9E \n", iVertex, (n_k*AreaMajor*T_k.Te + ourrates.NeTe*h_use) / (n_k*AreaMajor));
// Try to get rid of 77
}
memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
memcpy(p_MAR_neut + iVertex, &MAR_neut, sizeof(f64_vec3));
memcpy(p_MAR_ion + iVertex, &MAR_ion, sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex, &MAR_elec, sizeof(f64_vec3));
// ****************************************************************************************************
//// f64 TeV = T.Te * one_over_kB;
//// We loaded in ourrates.NT which indicates the new heat available so we should include some of that.
//// The main impact will be from heat conduction; dN/dt due to advection neglected here.
//f64 TeV = one_over_kB * (T.Te*our_n.n*AreaMajor + h_use*ourrates.NeTe)/
// (our_n.n*AreaMajor + h_use*ourrates.N);
//// Should be very careful here: ourrates.NeTe can soak to neutrals on timescale what? 1e-11?
//if (TeV < 0.0) {
// printf("\n\niVertex %d : ourrates.N %1.14E denominator %1.14E \n"
// " AreaMajor %1.14E TeV %1.14E ourrates.NeTe %1.10E h %1.10E \n"
// "ourrates.Nn %1.10E n %1.10E n_n %1.10E Te %1.10E Tn %1.10E \n\n",
// iVertex, ourrates.N,
// (our_n.n*AreaMajor + h_use*ourrates.N),
// AreaMajor, TeV, ourrates.NeTe, h_use,
// ourrates.Nn, our_n.n, our_n.n_n, T.Te, T.Tn);
//
//}
//f64 sqrtT = sqrt(TeV);
//f64 temp = 1.0e-5*exp(-13.6 / TeV) / (13.6*(6.0*13.6 + TeV)); // = S / T^1/2
// // Let h n n_n S be the ionising amount,
// // h n S is the proportion of neutrals! Make sure we do not run out!
////f64 hnS = (h_use*our_n.n*TeV*temp) / (sqrtT + h_use * our_n.n_n*temp*SIXTH*13.6);
// // d/dt (sqrtT) = 1/2 dT/dt T^-1/2.
// // dT[eV]/dt = -TWOTHIRDS * 13.6* n_n* sqrtT *temp
// // d/dt (sqrtT) = -THIRD*13.6*n_n*temp;
//// kind of midpoint, see SIXTH not THIRD:
//f64 Model_of_T_to_half = TeV / (sqrtT + h_use*SIXTH*13.6*our_n.n_n*temp / (1.0 - h_use*(our_n.n_n - our_n.n)*temp*sqrtT));
//f64 hS = h_use*temp*Model_of_T_to_half;
//
//// NEW:
//f64 ionise_rate = AreaMajor * our_n.n_n * our_n.n*hS /
// (h_use*(1.0 + hS*(our_n.n-our_n.n_n))); // dN/dt
//ourrates.N += ionise_rate;
//ourrates.Nn += -ionise_rate;
//// Let nR be the recombining amount, R is the proportion.
//TeV = T.Te * one_over_kB;
//f64 Ttothe5point5 = sqrtT * TeV * TeV*TeV * TeV*TeV;
//f64 hR = h_use * (our_n.n * our_n.n*8.75e-27*TeV) /
// (Ttothe5point5 + h_use * 2.25*TWOTHIRDS*13.6*our_n.n*our_n.n*8.75e-27);
//// T/T^5.5 = T^-4.5
//// T/(T^5.5+eps) < T^-4.5
//// For some reason I picked 2.25 = 4.5/2 instead of 5.5/2.
//// But basically it looks reasonable.
//// Maybe the additional stuff is an estimate of the change in T[eV]^5.5??
//// d/dt T^5.5 = 5.5 T^4.5 dT/dt
//// dT/dt = TWOTHIRDS * 13.6*( hR / h_use) = TWOTHIRDS * 13.6*( n^2 8.75e-27 T^-4.5)
//// d/dt T^5.5 = 5.5 TWOTHIRDS * 13.6*( n^2 8.75e-27 )
//f64 recomb_rate = AreaMajor * our_n.n * hR / h_use; // could reasonably again take hR/(1+hR) for n_k+1
//ourrates.N -= recomb_rate;
//ourrates.Nn += recomb_rate;
//if (TEST) printf("%d recomb rate %1.10E ionise_rate %1.10E our_n.n %1.10E nn %1.10E hR %1.10E hS %1.10E\n"
// "h_use %1.8E sqrtTeV %1.10E Ttothe5point5 %1.9E Te %1.9E modelThalf %1.9E\n", iVertex,
// recomb_rate, ionise_rate, our_n.n, our_n.n_n, hR, hS, h_use, sqrtT, Ttothe5point5, T.Te, Model_of_T_to_half);
//ourrates.NeTe += -TWOTHIRDS * 13.6*kB*(ionise_rate - recomb_rate) + 0.5*T.Tn*ionise_rate;
//ourrates.NiTi += 0.5*T.Tn*ionise_rate;
//ourrates.NnTn += (T.Te + T.Ti)*recomb_rate;
//if (TEST) {
// printf("kernelIonisation %d NeTe %1.12E NiTi %1.12E NnTn %1.12E\n"
// "due to I+R : NeTe %1.12E NiTi %1.12E NnTn %1.12E\n"
// "d/dtNeTe/N %1.9E d/dtNiTi/N %1.9E d/dtNnTn/Nn %1.9E \n\n",
// iVertex, ourrates.NeTe, ourrates.NiTi, ourrates.NnTn,
// -TWOTHIRDS * 13.6*kB*(ionise_rate - recomb_rate) + 0.5*T.Tn*ionise_rate,
// 0.5*T.Tn*ionise_rate,
// (T.Te + T.Ti)*recomb_rate,
// ourrates.NeTe / (our_n.n*AreaMajor), ourrates.NiTi / (our_n.n*AreaMajor), ourrates.NnTn / (our_n.n_n*AreaMajor));
//};
//memcpy(NTadditionrates + iVertex, &ourrates, sizeof(NTrates));
};
}
__global__ void kernelAdvanceDensityAndTemperature(
f64 h_use,
structural * __restrict__ p_info_major,
nvals * p_n_major,
T3 * p_T_major,
NTrates * __restrict__ NTadditionrates,
// Think we see the mistake here: are these to be major or minor values?
// Major, right? Check code:
nvals * p_n_use,
T3 * p_T_use,
v4 * __restrict__ p_vie_use,
f64_vec3 * __restrict__ p_v_n_use,
f64 * __restrict__ p_div_v_neut,
f64 * __restrict__ p_div_v,
f64 * __restrict__ p_Integrated_div_v_overall,
f64 * __restrict__ p_AreaMajor, // hmm
nvals * __restrict__ p_n_major_dest,
T3 * __restrict__ p_T_major_dest
)
{
// runs for major tile
// nu would have been a better choice to go in shared as it coexists with the 18 doubles in "LHS","inverted".
// Important to set 48K L1 for this routine.
__shared__ nvals n_src_or_use[threadsPerTileMajor];
__shared__ f64 AreaMajor[threadsPerTileMajor];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iVertex OF VERTEX
structural info = p_info_major[iVertex];
// if (iVertex == CHOSEN) printf("GPU iVertex %d info.flag %d \n", CHOSEN, info.flag);
if ((info.flag == DOMAIN_VERTEX)) {
n_src_or_use[threadIdx.x] = p_n_major[iVertex]; // used throughout so a good candidate to stick in shared mem
AreaMajor[threadIdx.x] = p_AreaMajor[iVertex]; // ditto
NTrates newdata;
{
NTrates AdditionNT = NTadditionrates[iVertex];
newdata.N = n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] + h_use * AdditionNT.N;
newdata.Nn = n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] + h_use * AdditionNT.Nn;
newdata.NnTn = h_use * AdditionNT.NnTn; // start off without knowing 'factor' so we can ditch AdditionNT
newdata.NiTi = h_use * AdditionNT.NiTi;
newdata.NeTe = h_use * AdditionNT.NeTe;
if (TEST)
printf("Advance_nT %d : nsrc %1.12E nn %1.12E *AreaMajor %1.12E %1.12E\n"
"newdata.Nn %1.12E newdata.Ni %1.12E AreaMajor %1.10E \n"
"h*additionNiTi %1.12E for e %1.12E for n %1.12E \n"
"AdditionNT.e %1.10E h_use %1.10E\n"
, iVertex,
n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n,
n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x],
n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x],
newdata.Nn, newdata.N, AreaMajor[threadIdx.x],
newdata.NiTi, newdata.NeTe, newdata.NnTn,
AdditionNT.NeTe, h_use);
}
// So at this vertex, near the insulator, NiTi that comes in is NaN. Is that advection or diffusion?
// Have to go to bed tonight...
{
nvals n_dest;
f64 Div_v_overall_integrated = p_Integrated_div_v_overall[iVertex];
n_dest.n = newdata.N / (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated); // Do have to worry whether advection steps are too frequent.
n_dest.n_n = newdata.Nn / (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated); // What could do differently: know ROC area as well as mass flux through walls
p_n_major_dest[iVertex] = n_dest;
// if (iVertex == CHOSEN) printf("GPU %d n_dest.n_n %1.14E Area_used %1.14E \n\n", iVertex, n_dest.n_n,
// (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated));
}
// roughly right ; maybe there are improvements.
// --------------------------------------------------------------------------------------------
// Simple way of doing area ratio for exponential growth of T:
// (1/(1+h div v)) -- v outward grows the area so must be + here.
// Compressive heating:
// USE 1 iteration of Halley's method for cube root:
// cu_root Q =~~= x0(x0^3+2Q)/(2x0^3+Q) .. for us x0 = 1, Q is (1+eps)^-2
// Thus (1+2(1+eps)^-2)/(2+(1+eps)^-2)
// Multiply through by (1+eps)^2:
// ((1+eps)^2+2)/(1+2*(1+eps)^2) .. well of course it is
// eps = h div v
// Way to get reasonable answer without re-doing equations:
// Take power -1/3 and multiply once before interspecies and once after.
f64 factor, factor_neut; // used again at end
{
f64 Div_v = p_div_v[iVertex];
f64 Div_v_n = p_div_v_neut[iVertex];
factor = (3.0 + h_use * Div_v) /
(3.0 + 2.0* h_use * Div_v);
factor_neut = (3.0 + h_use * Div_v_n) /
(3.0 + 2.0*h_use * Div_v_n);
}
// gives (1+ h div v)^(-1/3), roughly
// Alternate version:
// factor = pow(pVertex->AreaCell / pVertDest->AreaCell, 2.0 / 3.0);
// pVertDest->Ion.heat = pVertex->Ion.heat*factor;
// but the actual law is with 5/3
// Comp htg dT/dt = -2/3 T div v_fluid
// factor (1/(1+h div v))^(2/3) --> that's same
{
T3 T_src = p_T_major[iVertex];
newdata.NnTn += n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] * T_src.Tn*factor_neut;
newdata.NiTi += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Ti*factor;
newdata.NeTe += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Te*factor;
//
if (TEST) {
printf("\nAdvance_nT %d : n %1.12E Area %1.12E compressfac %1.10E \n"
"newdate.NiTi %1.12E Ti_k %1.12E newdata.NeTe %1.10E Te_k %1.10E\n",
iVertex, n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], factor,
newdata.NiTi, T_src.Ti, newdata.NeTe, T_src.Te);
}
}
f64 nu_ne_MT, nu_en_MT, nu_ni_MT, nu_in_MT, nu_ei; // optimize after
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal, lnLambda, s_in_MT, s_en_MT, s_en_visc;
n_src_or_use[threadIdx.x] = p_n_use[iVertex];
T3 T_use = p_T_use[iVertex];
sqrt_Te = sqrt(T_use.Te); // should be "usedata"
ionneut_thermal = sqrt(T_use.Ti / m_ion + T_use.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_src_or_use[threadIdx.x].n, T_use.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T_use.Ti*one_over_kB,
&s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T_use.Te*one_over_kB, // call with T in electronVolts
&s_en_MT,
&s_en_visc);
//s_en_MT = Estimate_Ion_Neutral_MT_Cross_section(T_use.Te*one_over_kB);
//s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_use.Te*one_over_kB);
if (n_src_or_use[threadIdx.x].n_n > ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n) {
s_en_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
s_in_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
}
// Need nu_ne etc to be defined:
nu_ne_MT = s_en_MT * n_src_or_use[threadIdx.x].n * electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT = s_in_MT * n_src_or_use[threadIdx.x].n * ionneut_thermal;
nu_en_MT = s_en_MT * n_src_or_use[threadIdx.x].n_n*electron_thermal;
nu_in_MT = s_in_MT * n_src_or_use[threadIdx.x].n_n*ionneut_thermal;
//
// if (iVertex == CHOSEN) {
// printf("nu_en_MT components GPU : %1.8E %1.8E %1.8E \n",
// s_en_MT, n_src_or_use[threadIdx.x].n_n, electron_thermal);
// f64 T = T_use.Te*one_over_kB;
// int j;
// printf("T = %1.10E\n", T);
// for (j = 0; j < 10; j++)
// printf("%d : cross_T_vals_d %1.10E cross_s_vals_MT %1.10E \n",
// j, cross_T_vals_d[j], cross_s_vals_MT_ni_d[j]);
// int i = 1;
// if (T > cross_T_vals_d[5]) {
// if (T > cross_T_vals_d[7]) {
// if (T > cross_T_vals_d[8])
// {
// i = 9; // top of interval
// }
// else {
// i = 8;
// };
// }
// else {
// if (T > cross_T_vals_d[6]) {
// i = 7;
// }
// else {
// i = 6;
// };
// };
// }
// else {
// if (T > cross_T_vals_d[3]) {
// if (T > cross_T_vals_d[4]) {
// i = 5;
// }
// else {
// i = 4;
// };
// }
// else {
// if (T > cross_T_vals_d[2]) {
// i = 3;
// }
// else {
// if (T > cross_T_vals_d[1]) {
// i = 2;
// }
// else {
// i = 1;
// };
// };
// };
// };
// // T lies between i-1,i
// printf("i = %d\n\n", i);
// }
nu_ei = nu_eiBarconst * kB_to_3halves*n_src_or_use[threadIdx.x].n*lnLambda /
(T_use.Te*sqrt_Te);
// nu_ie = nu_ei;
// nu_eHeart = 1.87*nu_eiBar + data_k.n_n*s_en_visc*electron_thermal;
}
// For now doing velocity-independent resistive heating.
// Because although we have a magnetic correction Upsilon_zz involved, we ignored it
// since we are also squashing the effect of velocity-dependent collisions on vx and vy (which
// would produce a current in the plane) and this squashing should create heat, which
// maybe means it adds up to the velocity-independent amount of heating.
{
f64_vec3 v_n = p_v_n_use[iVertex];
v4 vie = p_vie_use[iVertex];
newdata.NeTe += h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ei*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz));
newdata.NiTi += h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_in_MT*M_in*m_n*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
newdata.NnTn += h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
if (TEST)
printf("%d v_n.z %1.9E vie_use.viz %1.9E vie_use.vez %1.9E \n areamajor %1.8E\n"
"nu_in %1.10E nu_en %1.8E \n"
"Frictional htg (NT+=): n i e %1.10E %1.10E %1.10E\n",
iVertex, v_n.z, vie.viz, vie.vez, AreaMajor[threadIdx.x],
nu_in_MT, nu_en_MT,
h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz))),
h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_in_MT*M_in*m_n*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz))),
h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ei*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz))
);
}
f64_tens3 inverted;
{
f64_tens3 LHS;
// x = neutral
// y = ion
// z = elec
// This is for NT
f64 nu_ie = nu_ei;
LHS.xx = 1.0 - h_use * (-M_en * nu_ne_MT - M_in * nu_ni_MT);
LHS.xy = -h_use * (M_in * nu_in_MT);
LHS.xz = -h_use *(M_en * nu_en_MT);
LHS.yx = -h_use * M_in * nu_ni_MT;
LHS.yy = 1.0 - h_use * (-M_in * nu_in_MT - M_ei * nu_ie);
LHS.yz = -h_use * M_ei * nu_ei;
LHS.zx = -h_use * M_en * nu_ne_MT;
LHS.zy = -h_use * M_ei * nu_ie;
LHS.zz = 1.0 - h_use * (-M_en * nu_en_MT - M_ei * nu_ei);
// some indices appear reversed because NT not T.
if (TEST) {
printf("%d LHS | \n %1.14E %1.14E %1.14E |\n %1.14E %1.14E %1.14E | \n %1.14E %1.14E %1.14E | \n",
iVertex, LHS.xx, LHS.xy, LHS.xz,
LHS.yx, LHS.yy, LHS.yz,
LHS.zx, LHS.zy, LHS.zz);
printf("GPU %d : NnTn %1.14E NeTe %1.14E nu_en_MT %1.12E \n", iVertex, newdata.NnTn, newdata.NeTe, nu_en_MT);
}
LHS.Inverse(inverted);
}
f64_vec3 RHS;
f64 nu_ie = nu_ei;
RHS.x = newdata.NnTn - h_use * (nu_ni_MT*M_in + nu_ne_MT * M_en)*newdata.NnTn
+ h_use * nu_in_MT*M_in*newdata.NiTi + h_use * nu_en_MT*M_en*newdata.NeTe;
RHS.y = newdata.NiTi - h_use * (nu_in_MT*M_in + nu_ie * M_ei)*newdata.NiTi
+ h_use * nu_ni_MT*M_in*newdata.NnTn + h_use * nu_ei*M_ei*newdata.NeTe;
RHS.z = newdata.NeTe - h_use * (nu_en_MT*M_en + nu_ei * M_ei)*newdata.NeTe
+ h_use * nu_ie*M_ei*newdata.NiTi + h_use * nu_ne_MT*M_en*newdata.NnTn;
f64_vec3 NT;
NT = inverted * RHS;
newdata.NnTn = NT.x;
newdata.NiTi = NT.y;
newdata.NeTe = NT.z;
T3 T_dest;
T_dest.Tn = newdata.NnTn* factor_neut / newdata.Nn;
T_dest.Ti = newdata.NiTi* factor / newdata.N;
T_dest.Te = newdata.NeTe* factor / newdata.N;
if (TEST) {
printf("\ninverted %d | RHS \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n"
"NnTn %1.14E NiTi %1.14E NeTe %1.14E \n"
"Tn Ti Te %1.14E %1.14E %1.14E\n",
iVertex,
inverted.xx, inverted.xy, inverted.xz, RHS.x,
inverted.yx, inverted.yy, inverted.yz, RHS.y,
inverted.zx, inverted.zy, inverted.zz, RHS.z,
newdata.NnTn, newdata.NiTi, newdata.NeTe, T_dest.Tn, T_dest.Ti, T_dest.Te);
} // This came out with a value.
if (T_dest.Te != T_dest.Te) {
printf("Advance_n_T %d : Te NaN factor %1.8E newdata.N %1.10E flag %d \n"
"n %1.10E Area %1.10E hd/dtNT %1.10E\n",
iVertex, factor, newdata.N, info.flag,
n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], h_use * NTadditionrates[iVertex].N);
}
p_T_major_dest[iVertex] = T_dest;
}
else {
// nothing to do ??
if (info.flag == OUTERMOST) {
p_n_major_dest[iVertex] = p_n_major[iVertex];
p_T_major_dest[iVertex] = p_T_major[iVertex];
}
else {
memset(p_n_major_dest + iVertex, 0, sizeof(nvals));
memset(p_T_major_dest + iVertex, 0, sizeof(T3));
};
};
}
__global__ void kernelAdvanceDensityAndTemperature_nosoak_etc(
f64 h_use,
structural * __restrict__ p_info_major,
nvals * p_n_major,
T3 * p_T_major,
NTrates * __restrict__ NTadditionrates,
nvals * p_n_use,
T3 * p_T_use,
v4 * __restrict__ p_vie_use,
f64_vec3 * __restrict__ p_v_n_use,
f64 * __restrict__ p_div_v_neut,
f64 * __restrict__ p_div_v,
f64 * __restrict__ p_Integrated_div_v_overall,
f64 * __restrict__ p_AreaMajor, // hmm
nvals * __restrict__ p_n_major_dest,
T3 * __restrict__ p_T_major_dest
)
{
// runs for major tile
// nu would have been a better choice to go in shared as it coexists with the 18 doubles in "LHS","inverted".
// Important to set 48K L1 for this routine.
__shared__ nvals n_src_or_use[threadsPerTileMajor];
__shared__ f64 AreaMajor[threadsPerTileMajor];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iVertex OF VERTEX
structural info = p_info_major[iVertex];
// if (iVertex == CHOSEN) printf("GPU iVertex %d info.flag %d \n", CHOSEN, info.flag);
if ((info.flag == DOMAIN_VERTEX)) {
n_src_or_use[threadIdx.x] = p_n_major[iVertex]; // used throughout so a good candidate to stick in shared mem
AreaMajor[threadIdx.x] = p_AreaMajor[iVertex]; // ditto
NTrates newdata;
{
NTrates AdditionNT = NTadditionrates[iVertex];
newdata.N = n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] + h_use * AdditionNT.N;
newdata.Nn = n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] + h_use * AdditionNT.Nn;
newdata.NnTn = h_use * AdditionNT.NnTn; // start off without knowing 'factor' so we can ditch AdditionNT
newdata.NiTi = h_use * AdditionNT.NiTi;
newdata.NeTe = h_use * AdditionNT.NeTe;
if (TEST1)
printf("Advance_nT NOSOAK %d : nsrc %1.12E nn %1.12E *AreaMajor %1.12E %1.12E\n"
"newdata.Nn %1.12E newdata.Ni %1.12E AreaMajor %1.14E h_use %1.10E AdditionNT N Nn %1.10E %1.10E\n"
, iVertex,
n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n,
n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x],
n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x],
newdata.Nn, newdata.N, AreaMajor[threadIdx.x], h_use,
AdditionNT.N, AdditionNT.Nn);
nvals n_dest;
f64 Div_v_overall_integrated = p_Integrated_div_v_overall[iVertex];
n_dest.n = newdata.N / (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated); // Do have to worry whether advection steps are too frequent.
n_dest.n_n = newdata.Nn / (AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated); // What could do differently: know ROC area as well as mass flux through walls
p_n_major_dest[iVertex] = n_dest;
if (iVertex == VERTCHOSEN) printf("\n %d n_dest.n_n %1.14E Area_used %1.14E Div_v_overall_integ %1.13E\n\n",
iVertex, n_dest.n_n,
(AreaMajor[threadIdx.x] + h_use*Div_v_overall_integrated),
Div_v_overall_integrated);
}
// roughly right ; maybe there are improvements.
// --------------------------------------------------------------------------------------------
// Simple way of doing area ratio for exponential growth of T:
// (1/(1+h div v)) -- v outward grows the area so must be + here.
// Compressive heating:
// USE 1 iteration of Halley's method for cube root:
// cu_root Q =~~= x0(x0^3+2Q)/(2x0^3+Q) .. for us x0 = 1, Q is (1+eps)^-2
// Thus (1+2(1+eps)^-2)/(2+(1+eps)^-2)
// Multiply through by (1+eps)^2:
// ((1+eps)^2+2)/(1+2*(1+eps)^2) .. well of course it is
// eps = h div v
// Way to get reasonable answer without re-doing equations:
// Take power -1/3 and multiply once before interspecies and once after.
f64 factor, factor_neut; // used again at end
{
f64 Div_v = p_div_v[iVertex];
f64 Div_v_n = p_div_v_neut[iVertex];
factor = (3.0 + h_use * Div_v) /
(3.0 + 2.0* h_use * Div_v);
factor_neut = (3.0 + h_use * Div_v_n) /
(3.0 + 2.0*h_use * Div_v_n);
}
// gives (1+ h div v)^(-1/3), roughly
// Alternate version:
// factor = pow(pVertex->AreaCell / pVertDest->AreaCell, 2.0 / 3.0);
// pVertDest->Ion.heat = pVertex->Ion.heat*factor;
// but the actual law is with 5/3
// Comp htg dT/dt = -2/3 T div v_fluid
// factor (1/(1+h div v))^(2/3) --> that's same
{
T3 T_src = p_T_major[iVertex];
newdata.NnTn += n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] * T_src.Tn*factor_neut;
newdata.NiTi += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Ti*factor;
newdata.NeTe += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Te*factor;
//
if (TEST) {
printf("\nAdvance_nT NOSOAK %d : n %1.12E Area %1.12E compressfac %1.10E \n"
"newdata.NiTi (the new Ni Ti) %1.12E Ti_k %1.12E newdata.NeTe %1.10E Te_k %1.10E\n"
"newdata.NnTn (the new Nn Tn) %1.12E Tn_k %1.12E \n",
iVertex, n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], factor,
newdata.NiTi, T_src.Ti, newdata.NeTe, T_src.Te,
newdata.NnTn, T_src.Tn);
}
}
T3 T_dest;
T_dest.Tn = newdata.NnTn* factor_neut / newdata.Nn;
T_dest.Ti = newdata.NiTi* factor / newdata.N;
T_dest.Te = newdata.NeTe* factor / newdata.N;
if (TEST) {
printf("\nAdvance_nT NOSOAK %d : newdata.N %1.9E T_dest.Ti %1.10E Nn %1.9E T_dest.Tn %1.10E\n",
iVertex, newdata.N, T_dest.Ti, newdata.Nn, T_dest.Tn);
}
if (T_dest.Te != T_dest.Te) {
printf("Advance_n_T %d : Te NaN factor %1.8E newdata.N %1.10E flag %d \n"
"n %1.10E Area %1.10E hd/dtNT %1.10E\n",
iVertex, factor, newdata.N, info.flag,
n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], h_use * NTadditionrates[iVertex].N);
}
p_T_major_dest[iVertex] = T_dest;
}
else {
// nothing to do ??
if (info.flag == OUTERMOST) {
p_n_major_dest[iVertex] = p_n_major[iVertex];
p_T_major_dest[iVertex] = p_T_major[iVertex];
}
else {
memset(p_n_major_dest + iVertex, 0, sizeof(nvals));
memset(p_T_major_dest + iVertex, 0, sizeof(T3));
};
};
}
__global__ void kernelAdvanceDensityAndTemperature_noadvectioncompression(
f64 h_use,
structural * __restrict__ p_info_major,
nvals * p_n_major,
T3 * p_T_major,
NTrates * __restrict__ NTadditionrates,
nvals * p_n_use,
T3 * p_T_use,
v4 * __restrict__ p_vie_use,
f64_vec3 * __restrict__ p_v_n_use,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major_dest,
T3 * __restrict__ p_T_major_dest,
f64_vec3 * __restrict__ p_B_major
)
{
// runs for major tile
// nu would have been a better choice to go in shared as it coexists with the 18 doubles in "LHS","inverted".
// Important to set 48K L1 for this routine.
__shared__ nvals n_src_or_use[threadsPerTileMajor];
__shared__ f64 AreaMajor[threadsPerTileMajor];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iVertex OF VERTEX
structural info = p_info_major[iVertex];
// if (iVertex == CHOSEN) printf("GPU iVertex %d info.flag %d \n", CHOSEN, info.flag);
if ((info.flag == DOMAIN_VERTEX)) {
n_src_or_use[threadIdx.x] = p_n_major[iVertex]; // used throughout so a good candidate to stick in shared mem
AreaMajor[threadIdx.x] = p_AreaMajor[iVertex]; // ditto
NTrates newdata;
{
NTrates AdditionNT = NTadditionrates[iVertex];
newdata.N = n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] + h_use * AdditionNT.N;
newdata.Nn = n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] + h_use * AdditionNT.Nn;
newdata.NnTn = h_use * AdditionNT.NnTn; // start off without knowing 'factor' so we can ditch AdditionNT
newdata.NiTi = h_use * AdditionNT.NiTi;
newdata.NeTe = h_use * AdditionNT.NeTe;
nvals n_dest;
n_dest.n = newdata.N / (AreaMajor[threadIdx.x]);
n_dest.n_n = newdata.Nn / (AreaMajor[threadIdx.x]);
p_n_major_dest[iVertex] = n_dest;
if (TEST)
printf("Bdvance_nT %d : nsrc %1.13E nn %1.13E *AreaMajor %1.13E %1.13E\n"
"newdata.Nn %1.12E newdata.Ni %1.12E AreaMajor %1.14E n_n_k+1 %1.14E \n"
"h*additionNT.N %1.14E h*additionNT.Nn %1.14E h %1.14E h*addNT.NeTe %1.14E\n"
, iVertex,
n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n,
n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x],
n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x],
newdata.Nn, newdata.N, AreaMajor[threadIdx.x], n_dest.n_n,
h_use*AdditionNT.N, h_use*AdditionNT.Nn, h_use,
h_use*AdditionNT.NeTe);
}
f64 factor, factor_neut; // used again at end
{
T3 T_src = p_T_major[iVertex];
newdata.NnTn += n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] * T_src.Tn;
newdata.NiTi += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Ti;
newdata.NeTe += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Te;
//
if (TEST_T) {
printf("\nCdvance_nT %d : n %1.12E Area %1.12E compressfac %1.10E \n"
"newdata.NiTi %1.12E Ti_k %1.12E newdata.NeTe %1.10E Te_k %1.10E\n"
"newdata.NnTn %1.12E Tn_k %1.12E \n"
,
iVertex, n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], factor,
newdata.NiTi,T_src.Ti, newdata.NeTe, T_src.Te,
newdata.NnTn, T_src.Tn);
}
}
f64 nu_ne_MT, nu_en_MT, nu_ni_MT, nu_in_MT, nu_ei_effective; // optimize after
f64 nu_eiBar;
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal, lnLambda, s_in_MT, s_en_MT, s_en_visc;
n_src_or_use[threadIdx.x] = p_n_use[iVertex];
T3 T_use = p_T_use[iVertex];
sqrt_Te = sqrt(T_use.Te); // should be "usedata"
ionneut_thermal = sqrt(T_use.Ti / m_ion + T_use.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_src_or_use[threadIdx.x].n, T_use.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T_use.Ti*one_over_kB,
&s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T_use.Te*one_over_kB, // call with T in electronVolts
&s_en_MT,
&s_en_visc);
//s_en_MT = Estimate_Ion_Neutral_MT_Cross_section(T_use.Te*one_over_kB);
//s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_use.Te*one_over_kB);
if (n_src_or_use[threadIdx.x].n_n > ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n) {
s_en_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
s_in_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
}
// ARTIFICIAL CHANGE TO STOP HAVING TO WORRY ABOUT SILLY VALUES IN AREAS THAT DON'T MATTER MUCH :
s_en_MT *= ArtificialUpliftFactor(n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n);
s_in_MT *= ArtificialUpliftFactor(n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n); // returns factor 1.0 if n+nn > 1.0e14.
// Send heat into neutrals if there's not much stuff here total.
// Need nu_ne etc to be defined:
nu_ne_MT = s_en_MT * n_src_or_use[threadIdx.x].n * electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT = s_in_MT * n_src_or_use[threadIdx.x].n * ionneut_thermal;
nu_en_MT = s_en_MT * n_src_or_use[threadIdx.x].n_n*electron_thermal;
nu_in_MT = s_in_MT * n_src_or_use[threadIdx.x].n_n*ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*max(MINIMUM_NU_EI_DENSITY,n_src_or_use[threadIdx.x].n)*lnLambda / (T_use.Te*sqrt_Te);
f64 nu_eHeart = 1.87*nu_eiBar + n_src_or_use[threadIdx.x].n_n*s_en_visc*electron_thermal;
f64_vec3 omega = p_B_major[iVertex] * qovermc;
// Confusing, why does this say that? We used visc en in nu_eHeart, explanation?
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*
(nu_eHeart*nu_eHeart + omega.z*omega.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega.dot(omega))));
if (TEST) printf("%d nu_eiBar: %1.10E n %1.10E lnLambda %1.10E T_use %1.10E \n"
"nu_eHeart %1.10E omega %1.8E %1.8E %1.8E qovermc %1.8E nu_eiBar/nu_eHeart %1.8E \n"
"nunuomegaomegafac %1.9E ratio %1.9E 1.0-0.9* = %1.9E nu_ei_effective %1.9E\n",
iVertex, nu_eiBar, n_src_or_use[threadIdx.x].n, lnLambda, T_use.Te,
nu_eHeart, omega.x, omega.y, omega.z, qovermc,
nu_eiBar / nu_eHeart,
(nu_eHeart*nu_eHeart + omega.z*omega.z) / (nu_eHeart*nu_eHeart + omega.dot(omega)),
nu_eiBar*
(nu_eHeart*nu_eHeart + omega.z*omega.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega.dot(omega))),
(1.0 - 0.9*nu_eiBar*
(nu_eHeart*nu_eHeart + omega.z*omega.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega.dot(omega)))),
nu_ei_effective
);
// nu_ie = nu_ei;
// nu_eHeart = 1.87*nu_eiBar + data_k.n_n*s_en_visc*electron_thermal;
}
// For now doing velocity-independent resistive heating.
// Because although we have a magnetic correction Upsilon_zz involved, we ignored it
// since we are also squashing the effect of velocity-dependent collisions on vx and vy (which
// would produce a current in the plane) and this squashing should create heat, which
// maybe means it adds up to the velocity-independent amount of heating.
{
f64_vec3 v_n = p_v_n_use[iVertex];
v4 vie = p_vie_use[iVertex];
newdata.NeTe += h_use*(AreaMajor[threadIdx.x]*n_src_or_use[threadIdx.x].n * TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n* TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz));
// I see that I did resistive heating for nu_ei but did something much more complicated in the acceleration routine.
// That isn't quite right then.
newdata.NiTi += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n * TWOTHIRDS*nu_in_MT*M_in*m_n*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
newdata.NnTn += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n_n * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
if (TEST) {
printf(
"%d v_n.z %1.9E vie_use.viz %1.9E vie_use.vez %1.9E Frictional htg (NT+=):e %1.10E\n"
"elec e-n z htg: %1.10E i-e z htg: %1.10E \n",
iVertex, v_n.z, vie.viz, vie.vez,
h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n* TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n*TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz)),
h_use*AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n*TWOTHIRDS*nu_en_MT*m_en*(v_n.z - vie.vez)*(v_n.z - vie.vez),
h_use*AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n*TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz)
);
};
}
f64_tens3 inverted;
{
f64_tens3 LHS;
// x = neutral
// y = ion
// z = elec
// This is for NT
f64 nu_ie = nu_eiBar;
// gonna have to change to Backward Euler :/
// 6th Nov 2019 : add 2* so that it all goes here.
LHS.xx = 1.0 - 2.0*h_use * (-M_en * nu_ne_MT - M_in * nu_ni_MT);
LHS.xy = -2.0*h_use * (M_in * nu_in_MT);
LHS.xz = -2.0*h_use *(M_en * nu_en_MT);
LHS.yx = -2.0*h_use * M_in * nu_ni_MT;
LHS.yy = 1.0 - 2.0*h_use * (-M_in * nu_in_MT - M_ei * nu_ie);
LHS.yz = -2.0*h_use * M_ei * nu_eiBar;
LHS.zx = -2.0*h_use * M_en * nu_ne_MT;
LHS.zy = -2.0*h_use * M_ei * nu_ie;
LHS.zz = 1.0 - 2.0*h_use * (-M_en * nu_en_MT - M_ei * nu_eiBar);
if (TEST) {
printf("%d LHS | \n %1.14E %1.14E %1.14E |\n %1.14E %1.14E %1.14E | \n %1.14E %1.14E %1.14E | \n"
,
iVertex, LHS.xx, LHS.xy, LHS.xz,
LHS.yx, LHS.yy, LHS.yz,
LHS.zx, LHS.zy, LHS.zz);
}
LHS.Inverse(inverted);
}
f64_vec3 RHS;
f64 nu_ie = nu_eiBar;
// gonna have to change to Backward Euler :/
//RHS.x = newdata.NnTn - h_use * (nu_ni_MT*M_in + nu_ne_MT * M_en)*newdata.NnTn
// + h_use * nu_in_MT*M_in*newdata.NiTi + h_use * nu_en_MT*M_en*newdata.NeTe;
//RHS.y = newdata.NiTi - h_use * (nu_in_MT*M_in + nu_ie * M_ei)*newdata.NiTi
// + h_use * nu_ni_MT*M_in*newdata.NnTn + h_use * nu_ei*M_ei*newdata.NeTe;
//RHS.z = newdata.NeTe - h_use * (nu_en_MT*M_en + nu_ei * M_ei)*newdata.NeTe
// + h_use * nu_ie*M_ei*newdata.NiTi + h_use * nu_ne_MT*M_en*newdata.NnTn;
RHS.x = newdata.NnTn;
RHS.y = newdata.NiTi;
RHS.z = newdata.NeTe;
f64_vec3 NT;
NT = inverted * RHS;
newdata.NnTn = NT.x;
newdata.NiTi = NT.y;
newdata.NeTe = NT.z;
T3 T_dest;
T_dest.Tn = newdata.NnTn / newdata.Nn;
T_dest.Ti = newdata.NiTi / newdata.N;
T_dest.Te = newdata.NeTe/ newdata.N;
if (TEST) {
printf("\ninverted %d | RHS \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n"
" NnTn %1.14E NiTi %1.14E NeTe %1.14E \n"
"Tn Ti Te %1.14E %1.14E %1.14E\n\n"
,
iVertex, inverted.xx, inverted.xy, inverted.xz, RHS.x,
inverted.yx, inverted.yy, inverted.yz, RHS.y,
inverted.zx, inverted.zy, inverted.zz, RHS.z,
newdata.NnTn, newdata.NiTi, newdata.NeTe,
T_dest.Tn, T_dest.Ti, T_dest.Te);
} // This came out with a value.
// if (T_dest.Te != T_dest.Te) {
// printf("Advance_n_T %d : Te NaN factor %1.8E newdata.N %1.10E flag %d \n"
// "n %1.10E Area %1.10E hd/dtNT %1.10E\n",
// iVertex, factor, newdata.N, info.flag,
// n_src_or_use[threadIdx.x].n,AreaMajor[threadIdx.x] , h_use * NTadditionrates[iVertex].N);
// }
p_T_major_dest[iVertex] = T_dest;
} else {
// nothing to do ??
if (info.flag == OUTERMOST) {
p_n_major_dest[iVertex] = p_n_major[iVertex];
p_T_major_dest[iVertex] = p_T_major[iVertex];
}
else {
memset(p_n_major_dest + iVertex, 0, sizeof(nvals));
memset(p_T_major_dest + iVertex, 0, sizeof(T3));
};
};
}
__global__ void kernelAdvanceDensityAndTemperature_noadvectioncompression_Copy(
f64 h_use,
structural * __restrict__ p_info_major,
nvals * p_n_major,
T3 * p_T_major,
NTrates * __restrict__ NTadditionrates,
nvals * p_n_use,
T3 * p_T_use,
v4 * __restrict__ p_vie_use,
f64_vec3 * __restrict__ p_v_n_use,
f64 * __restrict__ p_AreaMajor,
nvals * __restrict__ p_n_major_dest,
T3 * __restrict__ p_T_major_dest,
f64_vec3 * __restrict__ p_B_major,
f64 * __restrict__ p_Tgraph_resistive,
f64 * __restrict__ p_Tgraph_other,
f64 * __restrict__ p_Tgraph_total,
f64 * __restrict__ p_Tgraph_dNT
)
{
// runs for major tile
// nu would have been a better choice to go in shared as it coexists with the 18 doubles in "LHS","inverted".
// Important to set 48K L1 for this routine.
__shared__ nvals n_src_or_use[threadsPerTileMajor];
__shared__ f64 AreaMajor[threadsPerTileMajor];
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // iVertex OF VERTEX
structural info = p_info_major[iVertex];
// if (iVertex == CHOSEN) printf("GPU iVertex %d info.flag %d \n", CHOSEN, info.flag);
if ((info.flag == DOMAIN_VERTEX)) {
n_src_or_use[threadIdx.x] = p_n_major[iVertex]; // used throughout so a good candidate to stick in shared mem
AreaMajor[threadIdx.x] = p_AreaMajor[iVertex]; // ditto
NTrates newdata;
{
NTrates AdditionNT = NTadditionrates[iVertex];
newdata.N = n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] + h_use * AdditionNT.N;
newdata.Nn = n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] + h_use * AdditionNT.Nn;
newdata.NnTn = h_use * AdditionNT.NnTn; // start off without knowing 'factor' so we can ditch AdditionNT
newdata.NiTi = h_use * AdditionNT.NiTi;
newdata.NeTe = h_use * AdditionNT.NeTe;
nvals n_dest;
n_dest.n = newdata.N / (AreaMajor[threadIdx.x]);
n_dest.n_n = newdata.Nn / (AreaMajor[threadIdx.x]);
p_n_major_dest[iVertex] = n_dest;
if (TEST)
printf("Bdvance_nT %d : nsrc %1.13E nn %1.13E *AreaMajor %1.13E %1.13E\n"
"newdata.Nn %1.12E newdata.Ni %1.12E AreaMajor %1.14E n_n_k+1 %1.14E \n"
"h*additionNT.N %1.14E h*additionNT.Nn %1.14E h %1.14E \n"
, VERTCHOSEN,
n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n,
n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x],
n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x],
newdata.Nn, newdata.N, AreaMajor[threadIdx.x], n_dest.n_n,
h_use*AdditionNT.N, h_use*AdditionNT.Nn, h_use);
}
f64 factor, factor_neut; // used again at end
T3 T_src = p_T_major[iVertex];
newdata.NnTn += n_src_or_use[threadIdx.x].n_n*AreaMajor[threadIdx.x] * T_src.Tn;
newdata.NiTi += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Ti;
newdata.NeTe += n_src_or_use[threadIdx.x].n*AreaMajor[threadIdx.x] * T_src.Te;
//
if (TEST) {
printf("\nAdvance_nT %d : n %1.12E Area %1.12E compressfac %1.10E \n"
"newdata.NiTi %1.12E Ti_k %1.12E newdata.NeTe %1.10E Te_k %1.10E\n"
"newdata.NnTn %1.12E Tn_k %1.12E \n"
,
VERTCHOSEN, n_src_or_use[threadIdx.x].n, AreaMajor[threadIdx.x], factor,
newdata.NiTi, T_src.Ti, newdata.NeTe, T_src.Te,
newdata.NnTn, T_src.Tn);
}
f64 nu_ne_MT, nu_en_MT, nu_ni_MT, nu_in_MT, nu_ei_effective; // optimize after
f64 nu_eiBar;
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal, lnLambda, s_in_MT, s_en_MT, s_en_visc;
n_src_or_use[threadIdx.x] = p_n_use[iVertex];
T3 T_use = p_T_use[iVertex];
sqrt_Te = sqrt(T_use.Te); // should be "usedata"
ionneut_thermal = sqrt(T_use.Ti / m_ion + T_use.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_src_or_use[threadIdx.x].n, T_use.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T_use.Ti*one_over_kB,
&s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T_use.Te*one_over_kB, // call with T in electronVolts
&s_en_MT,
&s_en_visc);
//s_en_MT = Estimate_Ion_Neutral_MT_Cross_section(T_use.Te*one_over_kB);
//s_en_visc = Estimate_Ion_Neutral_Viscosity_Cross_section(T_use.Te*one_over_kB);
if (n_src_or_use[threadIdx.x].n_n > ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n) {
s_en_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
s_in_MT *= n_src_or_use[threadIdx.x].n_n / (ARTIFICIAL_RELATIVE_THRESH_HEAT *n_src_or_use[threadIdx.x].n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
}
// ARTIFICIAL CHANGE TO STOP HAVING TO WORRY ABOUT SILLY VALUES IN AREAS THAT DON'T MATTER MUCH :
s_en_MT *= ArtificialUpliftFactor(n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n);
s_in_MT *= ArtificialUpliftFactor(n_src_or_use[threadIdx.x].n, n_src_or_use[threadIdx.x].n_n); // returns factor 1.0 if n+nn > 1.0e14.
// Send heat into neutrals if there's not much stuff here total.
// Need nu_ne etc to be defined:
nu_ne_MT = s_en_MT * n_src_or_use[threadIdx.x].n * electron_thermal; // have to multiply by n_e for nu_ne_MT
nu_ni_MT = s_in_MT * n_src_or_use[threadIdx.x].n * ionneut_thermal;
nu_en_MT = s_en_MT * n_src_or_use[threadIdx.x].n_n*electron_thermal;
nu_in_MT = s_in_MT * n_src_or_use[threadIdx.x].n_n*ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*max(MINIMUM_NU_EI_DENSITY, n_src_or_use[threadIdx.x].n)*lnLambda / (T_use.Te*sqrt_Te);
if (TEST) {
printf("nu_eiBar %1.12E n %1.12E lnLambda %1.10E \n\n", nu_eiBar, n_src_or_use[threadIdx.x].n, lnLambda);
real Te_eV = T_use.Te*one_over_kB;
real Te_eV2 = Te_eV*Te_eV;
real Te_eV3 = Te_eV*Te_eV2;
if (n_src_or_use[threadIdx.x].n*Te_eV3 > 0.0) {
f64 lnLambda1 = 23.0 - 0.5*log(n_src_or_use[threadIdx.x].n / Te_eV3);
f64 lnLambda2 = 24.0 - 0.5*log(n_src_or_use[threadIdx.x].n / Te_eV2);
// smooth between the two:
f64 factorxx = 2.0*fabs(Te_eV - 10.0)*(Te_eV - 10.0) / (1.0 + 4.0*(Te_eV - 10.0)*(Te_eV - 10.0));
lnLambda = lnLambda1*(0.5 - factorxx) + lnLambda2*(0.5 + factorxx);
printf("lnLambda1 2 %1.14E %1.14E lnLambda %1.14E Te_eV %1.12E factorxx %1.12E \n", lnLambda1, lnLambda2, lnLambda, Te_eV, factorxx);
// floor at 2 just in case, but it should not get near:
f64 lnLambda_sq = lnLambda*lnLambda;
factorxx = 1.0 + 0.5*lnLambda + 0.25*lnLambda_sq + 0.125*lnLambda*lnLambda_sq + 0.0625*lnLambda_sq*lnLambda_sq;
lnLambda += 2.0 / factorxx;
printf("lnLambda %1.14E after floor at 2 ... \n", lnLambda);
if (lnLambda < 2.0) lnLambda = 2.0;
};
};
f64 nu_eHeart = 1.87*nu_eiBar + n_src_or_use[threadIdx.x].n_n*s_en_visc*electron_thermal;
f64_vec3 omega = p_B_major[iVertex] * qovermc;
// Confusing, why does this say that? We used visc en in nu_eHeart, explanation?
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*
(nu_eHeart*nu_eHeart + omega.z*omega.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega.dot(omega))));
// nu_ie = nu_ei;
// nu_eHeart = 1.87*nu_eiBar + data_k.n_n*s_en_visc*electron_thermal;
}
// For now doing velocity-independent resistive heating.
// Because although we have a magnetic correction Upsilon_zz involved, we ignored it
// since we are also squashing the effect of velocity-dependent collisions on vx and vy (which
// would produce a current in the plane) and this squashing should create heat, which
// maybe means it adds up to the velocity-independent amount of heating.
{
f64_vec3 v_n = p_v_n_use[iVertex];
v4 vie = p_vie_use[iVertex];
newdata.NeTe += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n * TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n* TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz));
p_Tgraph_resistive[iVertex] = TWOTHIRDS*nu_en_MT*m_en*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.vez)*(v_n.z - vie.vez))
+ TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz);
// I see that I did resistive heating for nu_ei but did something much more complicated in the acceleration routine.
// That isn't quite right then.
newdata.NiTi += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n * TWOTHIRDS*nu_in_MT*M_in*m_n*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
newdata.NnTn += h_use*(AreaMajor[threadIdx.x] * n_src_or_use[threadIdx.x].n_n * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
(v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
+ (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
+ (v_n.z - vie.viz)*(v_n.z - vie.viz)));
//if (TEST)
// printf("%d v_n.z %1.9E vie_use.viz %1.9E vie_use.vez %1.9E \n areamajor %1.8E\n"
// "nu_in %1.10E nu_en %1.8E \n"
// "Frictional htg (NT+=): n i e %1.10E %1.10E %1.10E\n",
// VERTCHOSEN, v_n.z, vie.viz, vie.vez, AreaMajor[threadIdx.x],
// nu_in_MT, nu_en_MT,
// h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ni_MT*M_in*m_i*(
// (v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
// + (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
// + (v_n.z - vie.viz)*(v_n.z - vie.viz))),
// h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_in_MT*M_in*m_n*(
// (v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
// + (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
// + (v_n.z - vie.viz)*(v_n.z - vie.viz))),
// h_use*(AreaMajor[threadIdx.x] * TWOTHIRDS*nu_en_MT*m_en*(
// (v_n.x - vie.vxy.x)*(v_n.x - vie.vxy.x)
// + (v_n.y - vie.vxy.y)*(v_n.y - vie.vxy.y)
// + (v_n.z - vie.vez)*(v_n.z - vie.vez))
// + AreaMajor[threadIdx.x] * TWOTHIRDS*nu_ei_effective*m_ei*(vie.vez - vie.viz)*(vie.vez - vie.viz))
// );
}
f64_tens3 inverted;
{
f64_tens3 LHS;
// x = neutral
// y = ion
// z = elec
// This is for NT
f64 nu_ie = nu_eiBar;
// gonna have to change to Backward Euler :/
// 6th Nov 2019 : add 2* so that it all goes here.
LHS.xx = 1.0 - 2.0*h_use * (-M_en * nu_ne_MT - M_in * nu_ni_MT);
LHS.xy = -2.0*h_use * (M_in * nu_in_MT);
LHS.xz = -2.0*h_use *(M_en * nu_en_MT);
LHS.yx = -2.0*h_use * M_in * nu_ni_MT;
LHS.yy = 1.0 - 2.0*h_use * (-M_in * nu_in_MT - M_ei * nu_ie);
LHS.yz = -2.0*h_use * M_ei * nu_eiBar;
LHS.zx = -2.0*h_use * M_en * nu_ne_MT;
LHS.zy = -2.0*h_use * M_ei * nu_ie;
LHS.zz = 1.0 - 2.0*h_use * (-M_en * nu_en_MT - M_ei * nu_eiBar);
// some indices appear reversed because NT not T.
if (TEST) printf("LHS.zz %1.10E h_use %1.10E M_en %1.10E nu_en_MT %1.10E nu_eiBar %1.10E\n",
LHS.zz, h_use, M_en, nu_en_MT, nu_eiBar);
if (TEST) {
printf("LHS | \n %1.14E %1.14E %1.14E |\n %1.14E %1.14E %1.14E | \n %1.14E %1.14E %1.14E | \n",
LHS.xx, LHS.xy, LHS.xz,
LHS.yx, LHS.yy, LHS.yz,
LHS.zx, LHS.zy, LHS.zz);
printf("GPU %d : NnTn %1.14E NeTe %1.14E \n", VERTCHOSEN, newdata.NnTn, newdata.NeTe);
printf("GPU nu_en_MT %1.14E\n", nu_en_MT);
}
LHS.Inverse(inverted);
}
f64_vec3 RHS;
f64 nu_ie = nu_eiBar;
// gonna have to change to Backward Euler :/
//RHS.x = newdata.NnTn - h_use * (nu_ni_MT*M_in + nu_ne_MT * M_en)*newdata.NnTn
// + h_use * nu_in_MT*M_in*newdata.NiTi + h_use * nu_en_MT*M_en*newdata.NeTe;
//RHS.y = newdata.NiTi - h_use * (nu_in_MT*M_in + nu_ie * M_ei)*newdata.NiTi
// + h_use * nu_ni_MT*M_in*newdata.NnTn + h_use * nu_ei*M_ei*newdata.NeTe;
//RHS.z = newdata.NeTe - h_use * (nu_en_MT*M_en + nu_ei * M_ei)*newdata.NeTe
// + h_use * nu_ie*M_ei*newdata.NiTi + h_use * nu_ne_MT*M_en*newdata.NnTn;
RHS.x = newdata.NnTn;
RHS.y = newdata.NiTi;
RHS.z = newdata.NeTe;
f64_vec3 NT;
NT = inverted * RHS;
newdata.NnTn = NT.x;
newdata.NiTi = NT.y;
newdata.NeTe = NT.z;
T3 T_dest;
T_dest.Tn = newdata.NnTn / newdata.Nn;
T_dest.Ti = newdata.NiTi / newdata.N;
T_dest.Te = newdata.NeTe / newdata.N;
if (TEST) {
printf("\ninverted | RHS \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n %1.14E %1.14E %1.14E | %1.14E \n",
inverted.xx, inverted.xy, inverted.xz, RHS.x,
inverted.yx, inverted.yy, inverted.yz, RHS.y,
inverted.zx, inverted.zy, inverted.zz, RHS.z);
printf("GPU %d : NnTn %1.14E NiTi %1.14E NeTe %1.14E \n"
"Tn Ti Te %1.14E %1.14E %1.14E\n", VERTCHOSEN, newdata.NnTn, newdata.NiTi, newdata.NeTe,
T_dest.Tn, T_dest.Ti, T_dest.Te);
} // This came out with a value.
if (TEST) printf("%d : T_dest %1.8E %1.8E %1.8E \n"
"newdata .NeTe %1.10E .N %1.10E factor %1.10E\n\n",
iVertex, T_dest.Tn, T_dest.Ti, T_dest.Te,
newdata.NeTe, newdata.N, factor
);
p_T_major_dest[iVertex] = T_dest;
p_Tgraph_other[iVertex] = 2.0 * M_en * nu_en_MT*(T_dest.Tn - T_dest.Te)
+ 2.0 * M_ei * nu_eiBar*(T_dest.Ti - T_dest.Te);
p_Tgraph_total[iVertex] = (T_dest.Te - T_src.Te) / h_use;
p_Tgraph_dNT[iVertex] = (T_dest.Te - T_src.Te)* newdata.N / (AreaMajor[threadIdx.x] * h_use);
} else {
// nothing to do ??
if (info.flag == OUTERMOST) {
p_n_major_dest[iVertex] = p_n_major[iVertex];
p_T_major_dest[iVertex] = p_T_major[iVertex];
}
else {
memset(p_n_major_dest + iVertex, 0, sizeof(nvals));
memset(p_T_major_dest + iVertex, 0, sizeof(T3));
};
};
}
/*
__global__ void kernelCalculateUpwindDensity_tris(
structural * __restrict__ p_info_minor,
ShardModel * __restrict__ p_n_shard_n_major,
ShardModel * __restrict__ p_n_shard_major,
v4 * __restrict__ p_vie_minor,
f64_vec3 * __restrict__ p_v_n_minor,
f64_vec2 * __restrict__ p_overall_v_minor,
LONG3 * __restrict__ p_tricornerindex,
LONG3 * __restrict__ p_trineighindex,
LONG3 * __restrict__ p_which_iTri_number_am_I,
CHAR4 * __restrict__ p_szPBCneigh_tris,
nvals * __restrict__ p_n_upwind_minor, // result
T3 * __restrict__ p_T_minor,
T3 * __restrict__ p_T_upwind_minor // result
)
{
// The idea is to take the upwind n on each side of each
// major edge through this tri, weighted by |v.edge_normal|
// to produce an average.
__shared__ f64_vec2 shared_pos[threadsPerTileMinor]; // 4 doubles/vertex
__shared__ f64_12 shared_shards[threadsPerTileMajor]; // + 12
// 15 doubles right there. Max 21 for 288 vertices. 16 is okay.
// Might as well stick 1 more double in there if we get worried about registers.
// #############################################%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%###############
// We need a reverse index: this triangle carry 3 indices to know who it is to its corners.
long const iTri = blockDim.x*blockIdx.x + threadIdx.x;
structural const info = p_info_minor[iTri];
nvals result;
T3 upwindT;
shared_pos[threadIdx.x] = info.pos;
long const StartMajor = blockIdx.x*threadsPerTileMajor;
long const EndMajor = StartMajor + threadsPerTileMajor;
long const StartMinor = blockIdx.x*threadsPerTileMinor;
long const EndMinor = StartMinor + threadsPerTileMinor;
if (threadIdx.x < threadsPerTileMajor)
{
memcpy(&(shared_shards[threadIdx.x].n), &(p_n_shard_major[threadsPerTileMajor*blockIdx.x + threadIdx.x].n), MAXNEIGH * sizeof(f64));
// efficiency vs memcpy? We only need 12 here, not the centre.
}
__syncthreads();
f64 n0, n1, n2;
T3 T0, T1, T2;
f64_vec2 edge_normal0, edge_normal1, edge_normal2;
LONG3 tricornerindex, trineighindex;
LONG3 who_am_I;
f64_vec2 v_overall;
char szPBC_triminor[6];
CHAR4 szPBC_neighs;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)
|| (info.flag == CROSSING_CATH))
{
// Several things we need to collect:
// . v in this triangle and mesh v at this triangle centre.
// . edge_normal going each way
// . n that applies from each corner
// How to get n that applies from each corner:
tricornerindex = p_tricornerindex[iTri];
who_am_I = p_which_iTri_number_am_I[iTri];
szPBC_neighs = p_szPBCneigh_tris[iTri];
// Wasteful:
T0 = p_T_minor[tricornerindex.i1 + BEGINNING_OF_CENTRAL];
T1 = p_T_minor[tricornerindex.i2 + BEGINNING_OF_CENTRAL];
T2 = p_T_minor[tricornerindex.i3 + BEGINNING_OF_CENTRAL];
if ((tricornerindex.i1 >= StartMajor) && (tricornerindex.i1 < EndMajor))
{
n0 = shared_shards[tricornerindex.i1 - StartMajor].n[who_am_I.i1]; // whoa, be careful with data type / array
}
else {
n0 = p_n_shard_major[tricornerindex.i1].n[who_am_I.i1];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
if ((tricornerindex.i2 >= StartMajor) && (tricornerindex.i2 < EndMajor))
{
n1 = shared_shards[tricornerindex.i2 - StartMajor].n[who_am_I.i2];
}
else {
n1 = p_n_shard_major[tricornerindex.i2].n[who_am_I.i2];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
if ((tricornerindex.i3 >= StartMajor) && (tricornerindex.i3 < EndMajor))
{
n2 = shared_shards[tricornerindex.i3 - StartMajor].n[who_am_I.i3];
}
else {
n2 = p_n_shard_major[tricornerindex.i3].n[who_am_I.i3];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
v_overall = p_overall_v_minor[iTri];
f64_vec2 relv = p_vie_minor[iTri].vxy - v_overall;
// So this relies on the assumption that n = 0 outside of domain.
if ((info.flag == CROSSING_INS) || (info.flag == CROSSING_CATH)) {
int number_within = (n0 > 0.0) ? 1 : 0 + (n1 > 0.0) ? 1 : 0 + (n2 > 0.0) ? 1 : 0;
if (number_within == 1) {
result.n = n0 + n1 + n2;
upwindT.Te = T0.Te + T1.Te + T2.Te;
upwindT.Tn = T0.Tn + T1.Tn + T2.Tn;
upwindT.Ti = T0.Ti + T1.Ti + T2.Ti;
}
else {
// quick way not upwind:
result.n = 0.5*(n0 + n1 + n2);
upwindT.Te = 0.5*(T0.Te + T1.Te + T2.Te);
upwindT.Tn = 0.5*(T0.Tn + T1.Tn + T2.Tn);
upwindT.Ti = 0.5*(T0.Ti + T1.Ti + T2.Ti); // watch out for heat evacuating CROSSING_INS tris.
}
//if (iTri == 23400) printf("\n23400 was an insulator tri, T012 %1.8E %1.8E %1.8E upwind %1.8E\n"
// "indexcorner %d %d %d\n\n",
// T0.Te,T1.Te,T2.Te,upwindT.Te,
// tricornerindex.i1, tricornerindex.i1, tricornerindex.i3);
if (info.flag == CROSSING_CATH) {
// set n = 0 if position is within cathode rod:
if (!TestDomainPos(info.pos)) {
result.n = 0.0;
}
}
} else {
trineighindex = p_trineighindex[iTri];
// if (iTri == CHOSEN) printf("%d GPU: n0 %1.14E n1 %1.14E n2 %1.14E \n"
// "relv GPU %1.14E %1.14E \n",
// CHOSEN, n0, n1, n2, relv.x, relv.y);
f64_vec2 nearby_pos;
if ((trineighindex.i1 >= StartMinor) && (trineighindex.i1 < EndMinor)) {
nearby_pos = shared_pos[trineighindex.i1 - StartMinor];
}
else {
nearby_pos = p_info_minor[trineighindex.i1].pos;
}
if (szPBC_neighs.per0 == ROTATE_ME_CLOCKWISE) {
nearby_pos = Clockwise_d*nearby_pos;
}
if (szPBC_neighs.per0 == ROTATE_ME_ANTICLOCKWISE) {
nearby_pos = Anticlockwise_d*nearby_pos;
}
// Slightly puzzled why we don't just take difference of 2 corners of our triangle.
// Why dealing with tri positions instead of vertex positions? Because tri positions
// are the corners of the major cell.
edge_normal0.x = nearby_pos.y - info.pos.y;
edge_normal0.y = info.pos.x - nearby_pos.x;
// CAREFUL : which side is which???
// tri centre 2 is on same side of origin as corner 1 -- I think
// We don't know if the corners have been numbered anticlockwise?
// Could arrange it though.
// So 1 is anticlockwise for edge 0.
f64 numerator = 0.0;
f64 dot1, dot2;
f64 dot0 = relv.dot(edge_normal0);
if ((trineighindex.i2 >= StartMinor) && (trineighindex.i2 < EndMinor)) {
nearby_pos = shared_pos[trineighindex.i2 - StartMinor];
}
else {
nearby_pos = p_info_minor[trineighindex.i2].pos;
}
if (szPBC_neighs.per1 == ROTATE_ME_CLOCKWISE) {
nearby_pos = Clockwise_d*nearby_pos;
}
if (szPBC_neighs.per1 == ROTATE_ME_ANTICLOCKWISE) {
nearby_pos = Anticlockwise_d*nearby_pos;
}
edge_normal1.x = nearby_pos.y - info.pos.y;
edge_normal1.y = info.pos.x - nearby_pos.x;
dot1 = relv.dot(edge_normal1);
if ((trineighindex.i3 >= StartMinor) && (trineighindex.i3 < EndMinor)) {
nearby_pos = shared_pos[trineighindex.i3 - StartMinor];
}
else {
nearby_pos = p_info_minor[trineighindex.i3].pos;
}
if (szPBC_neighs.per2 == ROTATE_ME_CLOCKWISE) {
nearby_pos = Clockwise_d*nearby_pos;
}
if (szPBC_neighs.per2 == ROTATE_ME_ANTICLOCKWISE) {
nearby_pos = Anticlockwise_d*nearby_pos;
}
edge_normal2.x = nearby_pos.y - info.pos.y;
edge_normal2.y = info.pos.x - nearby_pos.x;
dot2 = relv.dot(edge_normal2);
bool b0, b1, b2; // is this n012 legit?
if (dot0 > 0.0) { b2 = 1; }
else { b1 = 1; };
if (dot1 > 0.0) { b0 = 1; }
else { b2 = 1; };
if (dot2 > 0.0) { b1 = 1; }
else { b0 = 1; };
//Usually now only one of b012 is false.
if (b0 == 0) {
if (b1 == 0) {
result.n = n2; // how idk
memcpy(&upwindT, &T2, sizeof(T3));
} else {
if (b2 == 0) {
result.n = n1;
memcpy(&upwindT, &T1, sizeof(T3));
} else {
result.n = min(n1, n2);
upwindT.Te = min(T1.Te, T2.Te);
upwindT.Ti = min(T1.Ti, T2.Ti);
}
}
} else {
if ((b1 == 0) && (b2 == 0)) {
result.n = n0;
memcpy(&upwindT, &T0, sizeof(T3));
} else {
if (b1 == 0) {
result.n = min(n0, n2);
memcpy(&upwindT, &T2, sizeof(T3));
} else {
if (b2 == 0)
{
result.n = min(n0, n1);
upwindT.Te = min(T1.Te, T0.Te);
upwindT.Ti = min(T1.Ti, T0.Ti);
} else {
result.n = min(min(n0, n1), n2);
upwindT.Te = min(T0.Te, min(T1.Te, T2.Te));
upwindT.Ti = min(T0.Ti, min(T1.Ti, T2.Ti));
}
}
}
}
// if (iTri == 23435) printf("CALC UPWIND n\n"
// "tricornerindex %d %d %d\n"
// "n0 n1 n2 %1.12E %1.12E %1.12E\n"
// "relv %1.9E %1.9E \n"
// "edge_nml %1.9E %1.9E | %1.9E %1.9E | %1.9E %1.9E \n"
// "dot %1.9E %1.9E %1.9E\n"
// "b0 b1 b2 %d %d %d \n"
// "result.n %1.9E\n\n",
// tricornerindex.i1, tricornerindex.i2, tricornerindex.i3,
// n0, n1, n2,
// relv.x, relv.y,
// edge_normal0.x, edge_normal0.y, edge_normal1.x, edge_normal1.y, edge_normal2.x, edge_normal2.y,
// dot0, dot1, dot2,
// (b0 ? 1 : 0), (b1 ? 1 : 0), (b2 ? 1 : 0),
// result.n);
//
//
// if (iTri == 23400) printf("\n23400 was a domain tri, T012 %1.8E %1.8E %1.8E upwind %1.8E\n"
// "relv %1.8E %1.8E b012 %d %d %d \n\n",
// T0.Te, T1.Te, T2.Te, upwindT.Te,
// relv.x, relv.y, (int)b0, (int)b1, (int)b2);
// Alternative way: try using squared weights of upwind n for v.dot(edgenormal).
// This old, doesn't work when JxB force empties out near ins:
// Argument against fabs in favour of squared weights?
};
// Think carefully / debug how it goes for CROSSING_INS.
} else {
result.n = 0.0;
memset(&upwindT, 0, sizeof(T3));
};
// Now same for upwind neutral density:
// In order to use syncthreads we had to come out of the branching.
if (threadIdx.x < threadsPerTileMajor)
{
memcpy(&(shared_shards[threadIdx.x].n),
&(p_n_shard_n_major[threadsPerTileMajor*blockIdx.x + threadIdx.x].n),
sizeof(f64)*MAXNEIGH);
// efficiency vs memcpy? We only need 12 here, not the centre.
}
__syncthreads();
// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)
|| (info.flag == CROSSING_CATH))
{
if ((tricornerindex.i1 >= StartMajor) && (tricornerindex.i1 < EndMajor))
{
n0 = shared_shards[tricornerindex.i1 - StartMajor].n[who_am_I.i1];
}
else {
n0 = p_n_shard_n_major[tricornerindex.i1].n[who_am_I.i1];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
if ((tricornerindex.i2 >= StartMajor) && (tricornerindex.i2 < EndMajor))
{
n1 = shared_shards[tricornerindex.i2 - StartMajor].n[who_am_I.i2];
} else {
n1 = p_n_shard_n_major[tricornerindex.i2].n[who_am_I.i2];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
if ((tricornerindex.i3 >= StartMajor) && (tricornerindex.i3 < EndMajor))
{
n2 = shared_shards[tricornerindex.i3 - StartMajor].n[who_am_I.i3];
} else {
n2 = p_n_shard_n_major[tricornerindex.i3].n[who_am_I.i3];
// at least it's 1 bus journey this way instead of 2 to fetch n_shards.
}
f64_vec2 relv = p_v_n_minor[iTri].xypart() - v_overall;
if ((info.flag == CROSSING_INS) || (info.flag == CROSSING_CATH)) {
int number_within = ((n0 > 0.0) ? 1 : 0) + ((n1 > 0.0) ? 1 : 0) + ((n2 > 0.0) ? 1 : 0);
if (number_within == 1) {
result.n_n = n0 + n1 + n2;
upwindT.Tn = T0.Tn + T1.Tn + T2.Tn;
// if ((iTri == 51243) || (iTri == 43048)) printf("%d : INS-1 nn012 %1.8E %1.8E %1.8E nn %1.10E\n",
// iTri, n0, n1, n2, result.n_n);
} else {
// quick way not upwind:
result.n_n = 0.5*(n0 + n1 + n2);
upwindT.Tn = 0.5*(T0.Tn + T1.Tn + T2.Tn);
// if ((iTri == 51243) || (iTri == 43048)) printf("%d : INS-2 nn012 %1.8E %1.8E %1.8E nn %1.10E\n",
// iTri, n0, n1, n2, result.n_n);
};
if (info.flag == CROSSING_CATH) {
// set n = 0 if position is within cathode rod:
if (!TestDomainPos(info.pos)) {
result.n_n = 0.0;
}
}
} else {
f64 numerator = 0.0;
f64 dot1, dot2;
f64 dot0 = relv.dot(edge_normal0);
dot1 = relv.dot(edge_normal1);
dot2 = relv.dot(edge_normal2);
bool b0, b1, b2; // is this n012 legit?
if (dot0 > 0.0) { b2 = 1; }
else { b1 = 1; };
if (dot1 > 0.0) { b0 = 1; }
else { b2 = 1; };
if (dot2 > 0.0) { b1 = 1; }
else { b0 = 1; };
//Usually now only one of b012 is false.
if (b0 == 0) {
if (b1 == 0) {
result.n_n = n2; // how idk
upwindT.Tn = T2.Tn;
}
else {
if (b2 == 0) { result.n = n1; }
else {
result.n_n = min(n1, n2);
upwindT.Tn = min(T1.Tn, T2.Tn);
}
}
}
else {
if ((b1 == 0) && (b2 == 0)) {
result.n_n = n0;
upwindT.Tn = T0.Tn;
}
else {
if (b1 == 0) {
result.n_n = min(n0, n2);
upwindT.Tn = min(T2.Tn, T0.Tn);
}
else {
if (b2 == 0)
{
result.n_n = min(n0, n1);
upwindT.Tn = min(T1.Tn, T0.Tn);
} else {
result.n_n = min(min(n0, n1), n2);
upwindT.Tn = min(min(T1.Tn, T0.Tn), T2.Tn);
}
}
}
}
// if ((iTri == 51243) || (iTri == 43048)) printf("%d : DOMAIN n012 %1.8E %1.8E %1.8E nn %1.10E\n",
// iTri, n0, n1,n2, result.n_n);
// Look carefully at what happens for CROSSING_INS.
// relv should be horizontal, hence it doesn't give a really low density? CHECK IT IN PRACTICE.
};
} else {
result.n_n = 0.0;
upwindT.Tn = 0.0;
};
p_n_upwind_minor[iTri] = result;
p_T_upwind_minor[iTri] = upwindT;
}*/
/*
__global__ void kernelAccumulateAdvectiveMassHeatRate(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBCtri_verts,
nvals * __restrict__ p_n_src_major,
T3 * __restrict__ p_T_src_major,
nvals * __restrict__ p_n_upwind_minor,
v4 * __restrict__ p_vie_minor,
f64_vec3 * __restrict__ p_v_n_minor,
f64_vec2 * __restrict__ p_v_overall_minor,
//T3 * __restrict__ p_T_minor, // may or may not overlap source: don't we only use from tris? so not overlap
T3 * __restrict__ p_T_upwind_minor,
NTrates * __restrict__ p_NTadditionrates,
f64 * __restrict__ p_div_v,
f64 * __restrict__ p_div_v_n,
f64 * __restrict__ p_Integrated_div_v_overall
)
{
// Use the upwind density from tris together with v_tri.
// Seems to include a factor h
__shared__ f64_vec2 shared_pos[threadsPerTileMinor]; // only reused what, 3 times? 2*2 per major thread
__shared__ nvals shared_n_upwind[threadsPerTileMinor];
__shared__ f64_vec2 shared_vxy[threadsPerTileMinor]; // 2*2 per major thread
__shared__ f64_vec2 shared_v_n[threadsPerTileMinor]; // could split routine; no good reason not to.
//__shared__ f64_vec2 v_overall[threadsPerTileMinor];
// choosing just to load it ad hoc
__shared__ T3 shared_T[threadsPerTileMinor]; // 2*2 ... rightly or wrongly.
// Do neutral after? Necessitates doing all the random loads again.
// Is that worse than loading for each point at the time, a 2-vector v_overall?
// About 6 bus journeys per external point. About 1/4 as many external as internal?
// ^ only 6 because doing ion&neutral together. Changing to do sep could make sense.
// 2* (2+2+2+2+3) = 22
// Max viable threads at 26: 236
// Max viable threads at 24: 256
// Can't store rel v: we use div v of each v in what follows.
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
{
structural info[2];
memcpy(info, p_info_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(structural) * 2);
shared_pos[2 * threadIdx.x] = info[0].pos;
shared_pos[2 * threadIdx.x + 1] = info[1].pos;
memcpy(&(shared_n_upwind[2 * threadIdx.x]),
p_n_upwind_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(nvals) * 2);
v4 vie[2];
memcpy(&vie, p_vie_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(v4) * 2);
shared_vxy[2 * threadIdx.x] = vie[0].vxy;
shared_vxy[2 * threadIdx.x + 1] = vie[1].vxy;
f64_vec3 v_n[2];
memcpy(v_n, p_v_n_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(f64_vec3) * 2);
shared_v_n[2 * threadIdx.x] = v_n[0].xypart();
shared_v_n[2 * threadIdx.x + 1] = v_n[1].xypart();
memcpy(&(shared_T[2 * threadIdx.x]), p_T_upwind_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(T3) * 2);
}
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const EndMinor = threadsPerTileMinor + StartMinor;
__syncthreads();
// What happens for abutting ins?
// T defined reasonably at insulator-crossing tri, A defined, v defined reasonably
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if (info.flag == DOMAIN_VERTEX) {
// T3 Tsrc = p_T_src_major[iVertex]; // UNUSED!
nvals nsrc = p_n_src_major[iVertex];
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
memcpy(izTri, p_izTri + iVertex * MAXNEIGH, sizeof(long) * MAXNEIGH);
memcpy(szPBC, p_szPBCtri_verts + iVertex*MAXNEIGH, sizeof(char)*MAXNEIGH);
// Now we are assuming what? Neigh 0 is below tri 0, so 0 1 are on neigh 0
// Check in debug. Looks true from comments.
short tri_len = info.neigh_len;
f64_vec2 edge_normal, endpt0, endpt1;
f64_vec2 vxy_prev, vxy_next;
f64_vec2 v_n_prev, v_n_next;
f64 n_next, n_prev, nn_next, nn_prev;
f64_vec2 v_overall_prev, v_overall_next;
f64 Te_next, Te_prev, Ti_next, Ti_prev, Tn_next, Tn_prev;
short inext, i = 0;
long iTri = izTri[0];
v_overall_prev = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt0 = shared_pos[iTri - StartMinor];
nvals nvls = shared_n_upwind[iTri - StartMinor];
n_prev = nvls.n;
nn_prev = nvls.n_n;
vxy_prev = shared_vxy[iTri - StartMinor];
v_n_prev = shared_v_n[iTri - StartMinor];
Te_prev = shared_T[iTri - StartMinor].Te;
Ti_prev = shared_T[iTri - StartMinor].Ti;
Tn_prev = shared_T[iTri - StartMinor].Tn;
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt0 = p_info_minor[iTri].pos;
nvals n_upwind = p_n_upwind_minor[iTri];
n_prev = n_upwind.n;
nn_prev = n_upwind.n_n;
vxy_prev = p_vie_minor[iTri].vxy;
v_n_prev = p_v_n_minor[iTri].xypart();
T3 Tuse = p_T_upwind_minor[iTri];
Te_prev = Tuse.Te;
Ti_prev = Tuse.Ti;
Tn_prev = Tuse.Tn;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
endpt0 = Clockwise_d*endpt0;
vxy_prev = Clockwise_d*vxy_prev;
v_n_prev = Clockwise_d*v_n_prev;
v_overall_prev = Clockwise_d*v_overall_prev;
};
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
endpt0 = Anticlockwise_d*endpt0;
vxy_prev = Anticlockwise_d*vxy_prev;
v_n_prev = Anticlockwise_d*v_n_prev;
v_overall_prev = Anticlockwise_d*v_overall_prev;
};
nvals totalmassflux_out;
memset(&totalmassflux_out, 0, sizeof(nvals));
T3 totalheatflux_out;
memset(&totalheatflux_out, 0, sizeof(T3));
f64 Integrated_div_v = 0.0;
f64 Integrated_div_v_n = 0.0;
f64 Integrated_div_v_overall = 0.0;
f64 AreaMajor = 0.0;
#pragma unroll MAXNEIGH
for (i = 0; i < tri_len; i++)
{
inext = i + 1; if (inext == tri_len) inext = 0;
long iTri = izTri[inext];
f64_vec2 v_overall_next = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt1 = shared_pos[iTri - StartMinor];
nvals nvls = shared_n_upwind[iTri - StartMinor];
n_next = nvls.n;
nn_next = nvls.n_n;
vxy_next = shared_vxy[iTri - StartMinor];
v_n_next = shared_v_n[iTri - StartMinor];
Te_next = shared_T[iTri - StartMinor].Te;
Ti_next = shared_T[iTri - StartMinor].Ti;
Tn_next = shared_T[iTri - StartMinor].Tn;
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt1 = p_info_minor[iTri].pos;
nvals n_upwind = p_n_upwind_minor[iTri];
n_next = n_upwind.n;
nn_next = n_upwind.n_n;
vxy_next = p_vie_minor[iTri].vxy;
v_n_next = p_v_n_minor[iTri].xypart();
T3 Tuse = p_T_upwind_minor[iTri];
Te_next = Tuse.Te;
Ti_next = Tuse.Ti;
Tn_next = Tuse.Tn;
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
endpt1 = Clockwise_d*endpt1;
vxy_next = Clockwise_d*vxy_next;
v_n_next = Clockwise_d*v_n_next;
v_overall_next = Clockwise_d*v_overall_next;
};
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
endpt1 = Anticlockwise_d*endpt1;
vxy_next = Anticlockwise_d*vxy_next;
v_n_next = Anticlockwise_d*v_n_next;
v_overall_next = Anticlockwise_d*v_overall_next;
};
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMajor += 0.5*edge_normal.x*(endpt0.x + endpt1.x);
// if (iVertex == CHOSEN) printf("GPU %d : AreaMajor %1.9E edge_nml.x %1.6E endpt0.x %1.6E endpt1.x %1.6E \n",
// iVertex,
// AreaMajor, edge_normal.x, endpt0.x, endpt1.x);
if ((n_prev != 0.0) && (n_next != 0.0)) {
Integrated_div_v_overall += 0.5*(v_overall_prev + v_overall_next).dot(edge_normal); // Average outward velocity of edge...
// The area CAN be changing because of other vertices dragging on it.
// However we can ignore it as n,T should be locally constant near the rod
// anyway.
Integrated_div_v += 0.5*(vxy_prev + vxy_next).dot(edge_normal);
totalmassflux_out.n += 0.5*(n_prev*(vxy_prev - v_overall_prev)
+ n_next*(vxy_next - v_overall_next)).dot(edge_normal);
totalheatflux_out.Ti += 0.5*(n_prev*Ti_prev*(vxy_prev - v_overall_prev)
+ n_next*Ti_next*(vxy_next - v_overall_next)).dot(edge_normal);
totalheatflux_out.Te += 0.5*(n_prev*Te_prev*(vxy_prev - v_overall_prev)
+ n_next*Te_next*(vxy_next - v_overall_next)).dot(edge_normal);
};
if ((nn_prev != 0.0) && (nn_next != 0.0)) {
Integrated_div_v_n += 0.5*(v_n_prev + v_n_next).dot(edge_normal);
totalmassflux_out.n_n += 0.5*(nn_prev*(v_n_prev - v_overall_prev)
+ nn_next*(v_n_next - v_overall_next)).dot(edge_normal);
totalheatflux_out.Tn += 0.5*(nn_prev*Tn_prev*(v_n_prev - v_overall_prev)
+ nn_next*Tn_next*(v_n_next - v_overall_next)).dot(edge_normal);
};
if (TEST1) printf("advect GPU %d : "
"i %d iTri %d heatfluxout_contrib e %1.14E \n"
"nprev %1.14E nnext %1.14E\n"
"Te_prev next %1.14E %1.14E \nrel vxy %1.14E %1.14E ; %1.14E %1.14E\n"
"edge_normal %1.14E %1.14E \n"
"-------------------------\n",
iVertex, i, iTri,
0.5*(n_prev*Te_prev*(vxy_prev - v_overall_prev)
+ n_next*Te_next*(vxy_next - v_overall_next)).dot(edge_normal),
n_prev, n_next,
Te_prev, Te_next, (vxy_prev - v_overall_prev).x, (vxy_prev - v_overall_prev).y,
(vxy_next - v_overall_next).x, (vxy_next - v_overall_next).y,
edge_normal.x, edge_normal.y);
if (TESTADVECT) printf("AccumulateAdvectiveMassHeatRate iVertex %d : inext %d iTri %d \n"
"NTiflux %1.9E cumu %1.9E n_prev %1.9E n_next %1.9E vxyprev %1.7E %1.7E\n"
"vxy_prev.edgenml %1.9E v_overall_prev. %1.9E vxy_next. %1.9E v_overall_next. %1.9E\n"
"Ti_prev %1.9E Ti_next %1.9E prev contrib %1.9E nex cntrib %1.9E\n"
"v_overall_next %1.9E %1.9E | \n"
"------------------------------------------------\n",
iVertex, i, iTri,
0.5*(n_prev*Ti_prev*(vxy_prev - v_overall_prev)
+ n_next*Ti_next*(vxy_next - v_overall_next)).dot(edge_normal),
totalheatflux_out.Ti, n_prev, n_next, vxy_prev.x, vxy_prev.y,
vxy_prev.dot(edge_normal),
v_overall_prev.dot(edge_normal),
vxy_next.dot(edge_normal),
v_overall_next.dot(edge_normal),
Ti_prev, Ti_next,
0.5*n_prev*Ti_prev*(vxy_prev - v_overall_prev).dot(edge_normal),
0.5*n_next*Ti_next*(vxy_next - v_overall_next).dot(edge_normal),
v_overall_next.x, v_overall_next.y
);
if (TESTADVECTNEUT) printf("AccumulateAdvectiveMassHeatRate iVertex %d : inext %d iTri %d \n"
"NnTnflux %1.9E cumu %1.9E nn_prev %1.9E nn_next %1.9E vxyprev %1.7E %1.7E\n"
"vxy_prev.edgenml %1.9E v_overall_prev. %1.9E vxy_next. %1.9E v_overall_next. %1.9E\n"
"Tn_prev %1.9E Tn_next %1.9E prev contrib %1.9E nex cntrib %1.9E\n"
"v_overall_next %1.9E %1.9E\n"
"------------------------------------------------\n",
iVertex, i, iTri,
0.5*(nn_prev*Tn_prev*(v_n_prev - v_overall_prev)
+ nn_next*Tn_next*(v_n_next - v_overall_next)).dot(edge_normal),
totalheatflux_out.Tn, nn_prev, nn_next, v_n_prev.x, v_n_prev.y,
v_n_prev.dot(edge_normal),
v_overall_prev.dot(edge_normal),
v_n_next.dot(edge_normal),
v_overall_next.dot(edge_normal),
Tn_prev, Tn_next,
0.5*nn_prev*Tn_prev*(v_n_prev - v_overall_prev).dot(edge_normal),
0.5*nn_next*Tn_next*(v_n_next - v_overall_next).dot(edge_normal),
v_overall_next.x, v_overall_next.y
);
// if (TEST) printf("advect GPU %d : "
// "i %d iTri %d heatfluxout_contrib %1.14E \n"
// "nprev %1.14E nnext %1.14E\n"
// "Ti_prev next %1.14E %1.14E \nrel vxy %1.14E %1.14E ; %1.14E %1.14E\n"
// "edge_normal %1.14E %1.14E \n"
// "-------------------------\n",
// iVertex, i, iTri,
// 0.5*(n_prev*Ti_prev*(vxy_prev - v_overall_prev)
// + n_next*Ti_next*(vxy_next - v_overall_next)).dot(edge_normal),
// n_prev, n_next,
// Ti_prev, Ti_next, (vxy_prev-v_overall_prev).x, (vxy_prev - v_overall_prev).y,
// (vxy_next - v_overall_next).x, (vxy_next - v_overall_next).y,
// edge_normal.x, edge_normal.y);
if (TEST1) printf("advect GPU %d : "
"i %d iTri %d heatfluxout_contrib e %1.14E \n"
"nprev %1.14E nnext %1.14E\n"
"Te_prev next %1.14E %1.14E \nrel vxy %1.14E %1.14E ; %1.14E %1.14E\n"
"edge_normal %1.14E %1.14E \n"
"-------------------------\n",
iVertex, i, iTri,
0.5*(n_prev*Te_prev*(vxy_prev - v_overall_prev)
+ n_next*Te_next*(vxy_next - v_overall_next)).dot(edge_normal) ,
n_prev, n_next,
Te_prev, Te_next, (vxy_prev - v_overall_prev).x, (vxy_prev - v_overall_prev).y,
(vxy_next - v_overall_next).x, (vxy_next - v_overall_next).y,
edge_normal.x, edge_normal.y);
//
endpt0 = endpt1;
n_prev = n_next;
nn_prev = nn_next;
vxy_prev = vxy_next;
v_n_prev = v_n_next;
v_overall_prev = v_overall_next;
Ti_prev = Ti_next;
Te_prev = Te_next;
Tn_prev = Tn_next;
};
NTrates NTplus;
NTplus.N = -totalmassflux_out.n;
NTplus.Nn = -totalmassflux_out.n_n;
NTplus.NeTe = -totalheatflux_out.Te;
NTplus.NiTi = -totalheatflux_out.Ti;
NTplus.NnTn = -totalheatflux_out.Tn;
//
// if (TEST) printf("\n%d : NTplus.NiTi %1.10E NTplus.N %1.10E Tsrc.i %1.10E nsrc.n %1.10E\n"
// "NTplus.NiTi/NTplus.N (avg temp of those coming/going) %1.10E\n"
// "NTplus.NiTi/N (ROC Ti) %1.10E\n"
// "NTplus.NiTi/NiTi (elasticity of T?) %1.10E \n"
// "NTplus.N/N (elasticity of N) %1.10E \n\n",
// CHOSEN, NTplus.NiTi, NTplus.N,
// Tsrc.Ti, nsrc.n,
// NTplus.NiTi/NTplus.N,
// NTplus.NiTi/(AreaMajor*nsrc.n),
// NTplus.NiTi/(AreaMajor*nsrc.n*Tsrc.Ti),
// NTplus.N/(AreaMajor*nsrc.n)
// );
memcpy(p_NTadditionrates + iVertex, &NTplus, sizeof(NTrates));
// What we need now:
// * Cope with non-domain vertex
p_div_v[iVertex] = Integrated_div_v / AreaMajor;
p_div_v_n[iVertex] = Integrated_div_v_n / AreaMajor;
p_Integrated_div_v_overall[iVertex] = Integrated_div_v_overall;
// if (iVertex == CHOSEN) printf(
// "Chosen: %d Integrated_div_v_n %1.9E p_div_v_n %1.9E \n",
// iVertex, Integrated_div_v_n, p_div_v_n[iVertex]);
// 3 divisions -- could speed up by creating 1.0/AreaMajor. Except it's bus time anyway.
} else {
p_div_v[iVertex] = 0.0;
p_div_v_n[iVertex] = 0.0;
p_Integrated_div_v_overall[iVertex] = 0.0;
};
}*/
__global__ void kernelAccumulateAdvectiveMassHeatRateNew(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBCtri_verts,
long * __restrict__ p_izNeigh_vert,
short * __restrict__ p_who_am_I_to_my_neighbours,
nvals * __restrict__ p_n_src_major,
T3 * __restrict__ p_T_src_major, // use T vertex itself to infer what T to use.
v4 * __restrict__ p_vie_minor,
// f64_vec3 * __restrict__ p_v_n_minor,
f64_vec2 * __restrict__ p_v_overall_minor,
//T3 * __restrict__ p_T_minor, // may or may not overlap source: don't we only use from tris? so not overlap
// ShardModel * __restrict__ p_n_shard_n_major,
ShardModel * __restrict__ p_n_shard_major,
NTrates * __restrict__ p_NTadditionrates,
f64 * __restrict__ p_div_v,
// f64 * __restrict__ p_div_v_n, // write ion & electron routine only first; re-do as neutral.
f64 * __restrict__ p_Integrated_div_v_overall,
NTrates * __restrict__ p_store_flux
)
{
// Use the upwind density from tris together with v_tri.
// Seems to include a factor h
__shared__ f64_vec2 shared_pos[threadsPerTileMinor]; // 4 -- assume we still use this.
// only reused what, 3 times? 2*2 per major thread
// do we
__shared__ f64_vec2 shared_vxy[threadsPerTileMinor]; // +2*2 per major thread
// __shared__ f64_vec2 shared_v_n[threadsPerTileMinor]; // +4
// could split routine; no good reason not to.
//__shared__ f64_vec2 v_overall[threadsPerTileMinor];
// choosing just to load it ad hoc
__shared__ T3 shared_T[threadsPerTileMajor]; // +3 ... = 15 total. Can run 128 threads.
// Should we just pre-average this on tris to make life easier for ourselves? No, because we also need T_opp.
__shared__ f64_12 shared_shards[threadsPerTileMajor];
// probably stick with loading in tri positions if we can.
// Probably can't manage to run this routine with 256 threads at a time. Can't fit 8 doubles / thread to shared.
// 24 doubles/thread to get 256 threads so we still need to chuck some out!
// 48K shared is default.
// scrap v_n, do neutral in sequence.
/////////////////////////////////////////////////////////////////////////////
// Can't store rel v: we use div v of each v in what follows.
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
{
structural info[2];
memcpy(info, p_info_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(structural) * 2);
shared_pos[2 * threadIdx.x] = info[0].pos;
shared_pos[2 * threadIdx.x + 1] = info[1].pos;
v4 vie[2];
memcpy(&vie, p_vie_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(v4) * 2);
shared_vxy[2 * threadIdx.x] = vie[0].vxy;
shared_vxy[2 * threadIdx.x + 1] = vie[1].vxy;
//f64_vec3 v_n[2];
//memcpy(v_n, p_v_n_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(f64_vec3) * 2);
//shared_v_n[2 * threadIdx.x] = v_n[0].xypart();
//shared_v_n[2 * threadIdx.x + 1] = v_n[1].xypart();
// memcpy(&(shared_T[2 * threadIdx.x]), p_T_upwind_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(T3) * 2);
shared_T[threadIdx.x] = p_T_src_major[iVertex];
}
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const EndMinor = threadsPerTileMinor + StartMinor;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMajor = StartMajor + threadsPerTileMajor;
__syncthreads();
// What happens for abutting ins?
// T defined reasonably at insulator-crossing tri, A defined, v defined reasonably
// Now that we have T from vertices, we'll need to define it on INS tri -- gulp. Just use avg of here and opp.
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX)) {
// We do not use for Outermost.
// Trouble is that sometimes a wall is moving and we want outermost to send US the mass.
// Solution: never shift vertices that lie outside a certain radius.
memcpy(&(shared_shards[threadIdx.x].n), &(p_n_shard_major[iVertex].n), MAXNEIGH * sizeof(f64));
// T3 Tsrc = p_T_src_major[iVertex]; // UNUSED!
nvals nsrc = p_n_src_major[iVertex];
// is this USED?
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
memcpy(izTri, p_izTri + iVertex * MAXNEIGH, sizeof(long) * MAXNEIGH);
memcpy(szPBC, p_szPBCtri_verts + iVertex*MAXNEIGH, sizeof(char)*MAXNEIGH);
// Now we are assuming what? Neigh 0 is below tri 0, so 0 1 are on neigh 0
// Check in debug. Looks true from comments.
short tri_len = info.neigh_len;
f64_vec2 edge_normal, endpt0, endpt1;
f64_vec2 vxy_prev, vxy_next;
f64_vec2 v_n_prev, v_n_next;
f64 n_next, n_prev, nn_next, nn_prev;
f64_vec2 v_overall_prev, v_overall_next;
f64 Te_next, Te_prev, Ti_next, Ti_prev, Tn_next, Tn_prev;
short inext, i = 0;
// Initial scenario: use triangle 0 & triangle 1. These face at neighbour 1. prev neigh = 0.
// Notice that for OUTERMOST we can make no such assumption --- the opposite holds.
long iNeigh = p_izNeigh_vert[iVertex*MAXNEIGH + 0];
if ((iNeigh >= StartMajor) && (iNeigh < EndMajor)) {
Ti_prev = shared_T[iNeigh - StartMajor].Ti;
Te_prev = shared_T[iNeigh - StartMajor].Te;
} else {
T3 Tload = p_T_src_major[iNeigh];
Ti_prev = Tload.Ti;
Te_prev = Tload.Te;
};
if (Ti_prev == 0.0) Ti_prev = shared_T[threadIdx.x].Ti;
if (Te_prev == 0.0) Te_prev = shared_T[threadIdx.x].Te;
long iTri = izTri[0];
v_overall_prev = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt0 = shared_pos[iTri - StartMinor];
//nvals nvls = shared_n_upwind[iTri - StartMinor];
//n_prev = nvls.n;
//nn_prev = nvls.n_n;
vxy_prev = shared_vxy[iTri - StartMinor];
//v_n_prev = shared_v_n[iTri - StartMinor];
//Te_prev = shared_T[iTri - StartMinor].Te;
//Ti_prev = shared_T[iTri - StartMinor].Ti;
//Tn_prev = shared_T[iTri - StartMinor].Tn;
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt0 = p_info_minor[iTri].pos;
//nvals n_upwind = p_n_upwind_minor[iTri];
//n_prev = n_upwind.n;
//nn_prev = n_upwind.n_n;
vxy_prev = p_vie_minor[iTri].vxy;
//v_n_prev = p_v_n_minor[iTri].xypart();
//T3 Tuse = p_T_upwind_minor[iTri];
//Te_prev = Tuse.Te;
//Ti_prev = Tuse.Ti;
//Tn_prev = Tuse.Tn;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
endpt0 = Clockwise_d*endpt0;
vxy_prev = Clockwise_d*vxy_prev;
//v_n_prev = Clockwise_d*v_n_prev;
v_overall_prev = Clockwise_d*v_overall_prev;
};
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
endpt0 = Anticlockwise_d*endpt0;
vxy_prev = Anticlockwise_d*vxy_prev;
//v_n_prev = Anticlockwise_d*v_n_prev;
v_overall_prev = Anticlockwise_d*v_overall_prev;
};
// We're going to need position of out vertex?
nvals totalmassflux_out;
memset(&totalmassflux_out, 0, sizeof(nvals));
T3 totalheatflux_out;
memset(&totalheatflux_out, 0, sizeof(T3)); // we're only going to use ion and electron
f64 Integrated_div_v = 0.0;
// f64 Integrated_div_v_n = 0.0;
f64 Integrated_div_v_overall = 0.0;
f64 AreaMajor = 0.0;
f64 Ti_opp, Te_opp;
iNeigh = p_izNeigh_vert[iVertex*MAXNEIGH + 1]; // neigh 0 is between 0 and 1, neigh -1 is before tri 0.
if ((iNeigh >= StartMajor) && (iNeigh < EndMajor)) {
Ti_opp = shared_T[iNeigh - StartMajor].Ti;
Te_opp = shared_T[iNeigh - StartMajor].Te;
}
else {
T3 Tload = p_T_src_major[iNeigh];
Ti_opp = Tload.Ti;
Te_opp = Tload.Te;
};
if (Ti_opp == 0.0) Ti_opp = shared_T[threadIdx.x].Ti;
if (Te_opp == 0.0) Te_opp = shared_T[threadIdx.x].Te;
#pragma unroll MAXNEIGH
for (i = 0; i < tri_len; i++)
{
inext = i + 1; if (inext == tri_len) inext = 0; // i,inext are the triangle indices
// Let's assume inext is the index of iNeigh but we should spit out lists to check this.
short inext2 = inext + 1; if (inext2 == tri_len) inext2 = 0;
long iNeighNext = p_izNeigh_vert[iVertex*MAXNEIGH + inext2]; // neigh0 is between 0 and 1, neigh -1 is before tri 0.
if ((iNeighNext >= StartMajor) && (iNeighNext < EndMajor)) {
Ti_next = shared_T[iNeighNext - StartMajor].Ti;
Te_next = shared_T[iNeighNext - StartMajor].Te;
}
else {
T3 Tload = p_T_src_major[iNeighNext]; // it's actually use not src.
Ti_next = Tload.Ti;
Te_next = Tload.Te;
};
if (Ti_next == 0.0) Ti_next = shared_T[threadIdx.x].Ti;
if (Te_next == 0.0) Te_next = shared_T[threadIdx.x].Te;
long iTri = izTri[inext];
f64_vec2 v_overall_next = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt1 = shared_pos[iTri - StartMinor];
vxy_next = shared_vxy[iTri - StartMinor];
// We are going to need a separate attempt to get at T from vertices, to use in Simpson.
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt1 = p_info_minor[iTri].pos;
vxy_next = p_vie_minor[iTri].vxy;
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
endpt1 = Clockwise_d*endpt1;
vxy_next = Clockwise_d*vxy_next;
v_n_next = Clockwise_d*v_n_next;
v_overall_next = Clockwise_d*v_overall_next;
};
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
endpt1 = Anticlockwise_d*endpt1;
vxy_next = Anticlockwise_d*vxy_next;
v_n_next = Anticlockwise_d*v_n_next;
v_overall_next = Anticlockwise_d*v_overall_next;
};
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMajor += 0.5*edge_normal.x*(endpt0.x + endpt1.x);
// if (iVertex == CHOSEN) printf("GPU %d : AreaMajor %1.9E edge_nml.x %1.6E endpt0.x %1.6E endpt1.x %1.6E \n",
// iVertex,
// AreaMajor, edge_normal.x, endpt0.x, endpt1.x);
n_prev = shared_shards[threadIdx.x].n[i];
n_next = shared_shards[threadIdx.x].n[inext];
// Totally smash up the runtime:
char neighflag = p_info_minor[iNeigh + BEGINNING_OF_CENTRAL].flag;
if ((neighflag == DOMAIN_VERTEX) ||
((info.flag == DOMAIN_VERTEX) && (neighflag == OUTERMOST)))
{
// Note: changes of cell area looking into ins / cath are not valid changes of area.
if ((n_prev != 0.0) && (n_next != 0.0)) {
Integrated_div_v_overall += 0.5*(v_overall_prev + v_overall_next).dot(edge_normal); // Average outward velocity of edge...
// The area CAN be changing because of other vertices dragging on it.
// However we can ignore it as n,T should be locally constant near the rod anyway.
Integrated_div_v += 0.5*(vxy_prev + vxy_next).dot(edge_normal);
f64 prev_relv = (vxy_prev - v_overall_prev).dot(edge_normal);
f64 next_relv = (vxy_next - v_overall_next).dot(edge_normal);
if (iVertex == VERTCHOSEN) printf("%d : i = %d %d , contrib to integ div v overall %1.9E v_overall prev %1.9E %1.9E\n"
"prev next dot normal %1.9E %1.9E\n",
iVertex, i, iNeigh, 0.5*(v_overall_prev + v_overall_next).dot(edge_normal),
v_overall_prev.x, v_overall_prev.y, v_overall_prev.dot(edge_normal),
v_overall_next.dot(edge_normal));
// Insulator: If 1 or 2 of the vertices comes out at T=0 then what?
// Fill in with our own value. Upwind.
// But absolutely ensure we are not looking out of domain at vertex! And we are throwing away anything that flowed into OUTERMOST. Too bad about that, leave it.
// Without loading info for the vertex we look at, we do not know if it's out of domain. So we have to rely on vr=0 at insulator.
// upwind? :
if (prev_relv + next_relv > 0.0) {
// For now:
f64 Ti_prevavg = THIRD*(shared_T[threadIdx.x].Ti + Ti_prev + Ti_opp);
f64 Ti_nextavg = THIRD*(shared_T[threadIdx.x].Ti + Ti_next + Ti_opp);
f64 Te_prevavg = THIRD*(shared_T[threadIdx.x].Te + Te_prev + Te_opp);
f64 Te_nextavg = THIRD*(shared_T[threadIdx.x].Te + Te_next + Te_opp);
// LIMIT flux T to 2*ours.
if (Ti_prevavg > 2.0*shared_T[threadIdx.x].Ti) Ti_prevavg = 2.0*shared_T[threadIdx.x].Ti;
if (Ti_nextavg > 2.0*shared_T[threadIdx.x].Ti) Ti_nextavg = 2.0*shared_T[threadIdx.x].Ti;
if (Te_prevavg > 2.0*shared_T[threadIdx.x].Te) Te_prevavg = 2.0*shared_T[threadIdx.x].Te;
if (Te_nextavg > 2.0*shared_T[threadIdx.x].Te) Te_nextavg = 2.0*shared_T[threadIdx.x].Te;
f64 Ti_avg = 0.5*(shared_T[threadIdx.x].Ti + Ti_opp);
f64 Te_avg = 0.5*(shared_T[threadIdx.x].Te + Te_opp);
if (Ti_avg > 2.0*shared_T[threadIdx.x].Ti) Ti_avg = 2.0*shared_T[threadIdx.x].Ti;
if (Te_avg > 2.0*shared_T[threadIdx.x].Te) Te_avg = 2.0*shared_T[threadIdx.x].Te;
NTrates NTaddn;
memset(&NTaddn, 0, sizeof(NTrates));
NTaddn.N = 0.25*n_prev*prev_relv + 0.25*n_next*next_relv
+ 0.5*0.25*(n_prev + n_next)*(prev_relv + next_relv);
NTaddn.NiTi = 0.25*n_prev*Ti_prevavg*prev_relv + 0.25*n_next*Ti_nextavg*next_relv
+ 0.125*(n_prev + n_next)*Ti_avg*(prev_relv + next_relv);
NTaddn.NeTe = 0.25*n_prev*Te_prevavg*prev_relv + 0.25*n_next*Te_nextavg*next_relv
+ 0.125*(n_prev + n_next)*Te_avg*(prev_relv + next_relv);
totalmassflux_out.n += NTaddn.N;
totalheatflux_out.Ti += NTaddn.NiTi;
totalheatflux_out.Te += NTaddn.NeTe;
// Maybe there's a speedup we can use.
// Now save to downwind cell:
short who_am_I = p_who_am_I_to_my_neighbours[iVertex*MAXNEIGH + inext];
memcpy(&(p_store_flux[iNeigh*MAXNEIGH + who_am_I]), &(NTaddn), sizeof(NTrates));
// NOTE WE DID N*O*T ADD A MINUS.
if (((iNeigh == VERTCHOSEN) || (iVertex == VERTCHOSEN)) && (TEST_ADV_HEAT_FLAG)) {
printf("iVertex %d NTaddn.NiTi %1.9E n_prev %1.9E Ti_prevavg %1.9E prev_relv %1.9E \n"
"n_next %1.9E Ti_nextavg %1.9E next_relv %1.9E Ti_avg %1.9E \n"
"totalheatflux_out.Ti %1.9E i %d iNeigh %d who_am_I %d\n"
"Ti_ours %1.8E Ti_opp %1.8E \n"
"---------------------------------------------------\n",
iVertex, NTaddn.NiTi, n_prev, Ti_prevavg, prev_relv, n_next, Ti_nextavg, next_relv,
Ti_avg, totalheatflux_out.Ti, i, iNeigh, who_am_I,
shared_T[threadIdx.x].Ti, Ti_opp);
}
if (((iNeigh == VERTCHOSEN) || (iVertex == VERTCHOSEN)) && (TEST_ADV_MASS_FLAG)) {
printf("iVertex %d iNeigh %d massflux_out %1.9E NTaddn.N %1.9E \n"
"n_prev n_next %1.9E %1.9E prev_relv next_relv %1.9E %1.9E \n"
"vxy_prev %1.8E %1.8E vxy_next %1.9E %1.9E edge_normal %1.8E %1.8E \n"
"v_overall prev %1.8E %1.8E next %1.8E %1.8E \n&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n",
iVertex, iNeigh, totalmassflux_out.n, NTaddn.N,
n_prev, n_next, prev_relv, next_relv,
vxy_prev.x, vxy_prev.y, vxy_next.x, vxy_next.y, edge_normal.x, edge_normal.y,
v_overall_prev.x, v_overall_prev.y, v_overall_next.x, v_overall_next.y);
};
}
else {
// downwind cell: collect flux later.
}
};
};
endpt0 = endpt1;
vxy_prev = vxy_next;
v_overall_prev = v_overall_next;
Ti_prev = Ti_opp;
Ti_opp = Ti_next;
Te_prev = Te_opp;
Te_opp = Te_next;
iNeigh = iNeighNext;
};
NTrates NTplus;
NTplus.N = -totalmassflux_out.n;
NTplus.Nn = 0.0; // -totalmassflux_out.n_n;
NTplus.NeTe = -totalheatflux_out.Te;
NTplus.NiTi = -totalheatflux_out.Ti;
NTplus.NnTn = 0.0; // -totalheatflux_out.Tn;
//
// if (TEST) printf("\n%d : NTplus.NiTi %1.10E NTplus.N %1.10E Tsrc.i %1.10E nsrc.n %1.10E\n"
// "NTplus.NiTi/NTplus.N (avg temp of those coming/going) %1.10E\n"
// "NTplus.NiTi/N (ROC Ti) %1.10E\n"
// "NTplus.NiTi/NiTi (elasticity of T?) %1.10E \n"
// "NTplus.N/N (elasticity of N) %1.10E \n\n",
// CHOSEN, NTplus.NiTi, NTplus.N,
// Tsrc.Ti, nsrc.n,
// NTplus.NiTi/NTplus.N,
// NTplus.NiTi/(AreaMajor*nsrc.n),
// NTplus.NiTi/(AreaMajor*nsrc.n*Tsrc.Ti),
// NTplus.N/(AreaMajor*nsrc.n)
// );
memcpy(p_NTadditionrates + iVertex, &NTplus, sizeof(NTrates));
// ROUTINE MUST BE CALLED FIRST - WE ZEROED OUT NEUTRAL DATA.
// What we need now:
// * Cope with non-domain vertex
p_div_v[iVertex] = Integrated_div_v / AreaMajor;
p_Integrated_div_v_overall[iVertex] = Integrated_div_v_overall;
}
else {
p_div_v[iVertex] = 0.0;
p_Integrated_div_v_overall[iVertex] = 0.0;
};
}
__global__ void kernelAccumulateNeutralAdvectiveMassHeatRateNew(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBCtri_verts,
long * __restrict__ p_izNeigh_vert,
short * __restrict__ p_who_am_I_to_my_neighbours,
nvals * __restrict__ p_n_src_major,
T3 * __restrict__ p_T_src_major, // use T vertex itself to infer what T to use.
f64_vec3 * __restrict__ p_v_n_minor,
f64_vec2 * __restrict__ p_v_overall_minor,
ShardModel * __restrict__ p_n_shard_major,
NTrates * __restrict__ p_NTadditionrates,
f64 * __restrict__ p_div_v_n,
NTrates * __restrict__ p_store_flux
)
{
// Use the upwind density from tris together with v_tri.
// Seems to include a factor h
__shared__ f64_vec2 shared_pos[threadsPerTileMinor]; // 4 -- assume we still use this.
// only reused what, 3 times? 2*2 per major thread
// do we
__shared__ f64_vec2 shared_vxy[threadsPerTileMinor]; // +2*2 per major thread
// __shared__ f64_vec2 shared_v_n[threadsPerTileMinor]; // +4
// could split routine; no good reason not to.
//__shared__ f64_vec2 v_overall[threadsPerTileMinor];
// choosing just to load it ad hoc
__shared__ f64 shared_T[threadsPerTileMajor]; // +3 ... = 15 total. Can run 128 threads.
// Should we just pre-average this on tris to make life easier for ourselves? No, because we also need T_opp.
__shared__ f64_12 shared_shards[threadsPerTileMajor];
// probably stick with loading in tri positions if we can.
// Probably can't manage to run this routine with 256 threads at a time. Can't fit 8 doubles / thread to shared.
// 24 doubles/thread to get 256 threads so we still need to chuck some out!
// 48K shared is default.
// scrap v_n, do neutral in sequence.
/////////////////////////////////////////////////////////////////////////////
// Can't store rel v: we use div v of each v in what follows.
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
{
structural info[2];
memcpy(info, p_info_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(structural) * 2);
shared_pos[2 * threadIdx.x] = info[0].pos;
shared_pos[2 * threadIdx.x + 1] = info[1].pos;
f64_vec3 v_n[2];
memcpy(&v_n, p_v_n_minor + (threadsPerTileMinor*blockIdx.x + 2 * threadIdx.x), sizeof(f64_vec3) * 2);
shared_vxy[2 * threadIdx.x] = v_n[0].xypart();
shared_vxy[2 * threadIdx.x + 1] = v_n[1].xypart();
shared_T[threadIdx.x] = p_T_src_major[iVertex].Tn;
}
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const EndMinor = threadsPerTileMinor + StartMinor;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMajor = StartMajor + threadsPerTileMajor;
__syncthreads();
// What happens for abutting ins?
// T defined reasonably at insulator-crossing tri, A defined, v defined reasonably
// Now that we have T from vertices, we'll need to define it on INS tri -- gulp. Just use avg of here and opp.
structural info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX)) {
// see above -- no shift vertex outside certain radius
memcpy(&(shared_shards[threadIdx.x].n), &(p_n_shard_major[iVertex].n), MAXNEIGH * sizeof(f64));
// T3 Tsrc = p_T_src_major[iVertex]; // UNUSED!
nvals nsrc = p_n_src_major[iVertex];
// is this USED?
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
memcpy(izTri, p_izTri + iVertex * MAXNEIGH, sizeof(long) * MAXNEIGH);
memcpy(szPBC, p_szPBCtri_verts + iVertex*MAXNEIGH, sizeof(char)*MAXNEIGH);
// Now we are assuming what? Neigh 0 is below tri 0, so 0 1 are on neigh 0
// Check in debug. Looks true from comments.
short tri_len = info.neigh_len;
f64_vec2 edge_normal, endpt0, endpt1;
f64_vec2 vxy_prev, vxy_next;
f64_vec2 v_n_prev, v_n_next;
f64 n_next, n_prev, nn_next, nn_prev;
f64_vec2 v_overall_prev, v_overall_next;
f64 T_next, T_prev;
short inext, i = 0;
// Initial scenario: use triangle 0 & triangle 1. These face at neighbour 1. prev neigh = 0.
long iNeigh = p_izNeigh_vert[iVertex*MAXNEIGH + 0];
if ((iNeigh >= StartMajor) && (iNeigh < EndMajor)) {
T_prev = shared_T[iNeigh - StartMajor];
} else {
T_prev = p_T_src_major[iNeigh].Tn;
};
if (T_prev == 0.0) T_prev = shared_T[threadIdx.x];
long iTri = izTri[0];
v_overall_prev = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt0 = shared_pos[iTri - StartMinor];
//nvals nvls = shared_n_upwind[iTri - StartMinor];
//n_prev = nvls.n;
//nn_prev = nvls.n_n;
vxy_prev = shared_vxy[iTri - StartMinor];
//v_n_prev = shared_v_n[iTri - StartMinor];
//Te_prev = shared_T[iTri - StartMinor].Te;
//Ti_prev = shared_T[iTri - StartMinor].Ti;
//Tn_prev = shared_T[iTri - StartMinor].Tn;
}
else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt0 = p_info_minor[iTri].pos;
//nvals n_upwind = p_n_upwind_minor[iTri];
//n_prev = n_upwind.n;
//nn_prev = n_upwind.n_n;
vxy_prev = p_v_n_minor[iTri].xypart();
//v_n_prev = p_v_n_minor[iTri].xypart();
//T3 Tuse = p_T_upwind_minor[iTri];
//Te_prev = Tuse.Te;
//Ti_prev = Tuse.Ti;
//Tn_prev = Tuse.Tn;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
endpt0 = Clockwise_d*endpt0;
vxy_prev = Clockwise_d*vxy_prev;
//v_n_prev = Clockwise_d*v_n_prev;
v_overall_prev = Clockwise_d*v_overall_prev;
};
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
endpt0 = Anticlockwise_d*endpt0;
vxy_prev = Anticlockwise_d*vxy_prev;
//v_n_prev = Anticlockwise_d*v_n_prev;
v_overall_prev = Anticlockwise_d*v_overall_prev;
};
// We're going to need position of out vertex?
f64 totalmassflux_out;
memset(&totalmassflux_out, 0, sizeof(f64));
f64 totalheatflux_out;
memset(&totalheatflux_out, 0, sizeof(f64));
f64 Integrated_div_v = 0.0;
f64 AreaMajor = 0.0;
f64 T_opp;
iNeigh = p_izNeigh_vert[iVertex*MAXNEIGH + 1]; // neigh0 is between 0 and 1, neigh -1 is before tri 0.
if ((iNeigh >= StartMajor) && (iNeigh < EndMajor)) {
T_opp = shared_T[iNeigh - StartMajor];
} else {
T_opp = p_T_src_major[iNeigh].Tn;
};
if (T_opp == 0.0) T_opp = shared_T[threadIdx.x];
#pragma unroll MAXNEIGH
for (i = 0; i < tri_len; i++)
{
inext = i + 1; if (inext == tri_len) inext = 0; // i,inext are the triangle indices
// Let's assume inext is the index of iNeigh but we should spit out lists to check this.
short inext2 = inext + 1; if (inext2 == tri_len) inext2 = 0;
long iNeighNext = p_izNeigh_vert[iVertex*MAXNEIGH + inext2]; // neigh0 is between 0 and 1, neigh -1 is before tri 0.
if ((iNeighNext >= StartMajor) && (iNeighNext < EndMajor)) {
T_next = shared_T[iNeighNext - StartMajor];
}
else {
T_next = p_T_src_major[iNeighNext].Tn; // it's actually use not src.
};
if (T_next == 0.0) T_next = shared_T[threadIdx.x];
long iTri = izTri[inext];
f64_vec2 v_overall_next = p_v_overall_minor[iTri];
if ((iTri >= StartMinor) && (iTri < EndMinor)) {
endpt1 = shared_pos[iTri - StartMinor];
vxy_next = shared_vxy[iTri - StartMinor];
// Te_next = shared_T[iTri - StartMinor].Te;
// Ti_next = shared_T[iTri - StartMinor].Ti;
// Tn_next = shared_T[iTri - StartMinor].Tn;
// We are going to need a separate attempt to get at T from vertices, to use in Simpson.
} else {
// The volume of random bus accesses means that we would have been better off making a separate
// neutral routine even though it looks efficient with the shared loading. nvm
endpt1 = p_info_minor[iTri].pos;
vxy_next = p_v_n_minor[iTri].xypart();
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
endpt1 = Clockwise_d*endpt1;
vxy_next = Clockwise_d*vxy_next;
v_overall_next = Clockwise_d*v_overall_next;
};
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
endpt1 = Anticlockwise_d*endpt1;
vxy_next = Anticlockwise_d*vxy_next;
v_overall_next = Anticlockwise_d*v_overall_next;
};
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMajor += 0.5*edge_normal.x*(endpt0.x + endpt1.x);
// if (iVertex == CHOSEN) printf("GPU %d : AreaMajor %1.9E edge_nml.x %1.6E endpt0.x %1.6E endpt1.x %1.6E \n",
// iVertex,
// AreaMajor, edge_normal.x, endpt0.x, endpt1.x);
n_prev = shared_shards[threadIdx.x].n[i];
n_next = shared_shards[threadIdx.x].n[inext];
// Totally smash up the runtime:
char neighflag = p_info_minor[iNeigh + BEGINNING_OF_CENTRAL].flag;
if ((neighflag == DOMAIN_VERTEX) ||
((info.flag == DOMAIN_VERTEX) && (neighflag == OUTERMOST)))
{
if ((n_prev != 0.0) && (n_next != 0.0)) {
Integrated_div_v += 0.5*(vxy_prev + vxy_next).dot(edge_normal);
f64 prev_relv = (vxy_prev - v_overall_prev).dot(edge_normal);
f64 next_relv = (vxy_next - v_overall_next).dot(edge_normal);
// Insulator: If 1 or 2 of the vertices comes out at T=0 then what?
// Fill in with our own value. Upwind.
// But absolutely ensure we are not looking out of domain at vertex! And we are throwing away anything that flowed into OUTERMOST. Too bad about that, leave it.
// Without loading info for the vertex we look at, we do not know if it's out of domain. So we have to rely on vr=0 at insulator.
// upwind? :
if (prev_relv + next_relv > 0.0) {
// Note: you are not upwind for both neutrals & ions necessarily.
// For now:
f64 T_prevavg = THIRD*(shared_T[threadIdx.x] + T_prev + T_opp);
f64 T_nextavg = THIRD*(shared_T[threadIdx.x] + T_next + T_opp);
// LIMIT flux T to 2*ours.
if (T_prevavg > 2.0*shared_T[threadIdx.x]) T_prevavg = 2.0*shared_T[threadIdx.x];
if (T_nextavg > 2.0*shared_T[threadIdx.x]) T_nextavg = 2.0*shared_T[threadIdx.x];
f64 T_avg = 0.5*(shared_T[threadIdx.x] + T_opp);
if (T_avg > 2.0*shared_T[threadIdx.x]) T_avg = 2.0*shared_T[threadIdx.x];
f64 NTaddnN, NTaddnNT;
NTaddnN = 0.25*n_prev*prev_relv + 0.25*n_next*next_relv
+ 0.5*0.25*(n_prev + n_next)*(prev_relv + next_relv);
NTaddnNT = 0.25*n_prev*T_prevavg*prev_relv + 0.25*n_next*T_nextavg*next_relv
+ 0.125*(n_prev + n_next)*T_avg*(prev_relv + next_relv);
totalmassflux_out += NTaddnN;
totalheatflux_out += NTaddnNT;
// Maybe there's a speedup we can use.
// Now save to downwind cell:
short who_am_I = p_who_am_I_to_my_neighbours[iVertex*MAXNEIGH + inext];
p_store_flux[iNeigh*MAXNEIGH + who_am_I].Nn = NTaddnN;
p_store_flux[iNeigh*MAXNEIGH + who_am_I].NnTn = NTaddnNT;
// NOTE WE DID N*O*T ADD A MINUS.
// if (((iNeigh == VERTCHOSEN) || (iVertex == VERTCHOSEN)) && (TEST_ADV_MASS_FLAG)) {
// printf("iVertex %d iNeigh %d massflux_out %1.9E NTaddn.Nn %1.9E \n"
// "nn_prev nn_next %1.9E %1.9E prev_relv next_relv %1.9E %1.9E \n"
// "vxy_prev %1.8E %1.8E vxy_next %1.9E %1.9E edge_normal %1.8E %1.8E \n"
// "v_overall prev %1.8E %1.8E next %1.8E %1.8E who_am_I %d \n&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n",
// iVertex, iNeigh, totalmassflux_out, NTaddnN,
// n_prev, n_next, prev_relv, next_relv,
// vxy_prev.x, vxy_prev.y, vxy_next.x, vxy_next.y, edge_normal.x, edge_normal.y,
// v_overall_prev.x, v_overall_prev.y, v_overall_next.x, v_overall_next.y, who_am_I);
// };
}
else {
// downwind cell: collect flux later.
};
};
};
endpt0 = endpt1;
vxy_prev = vxy_next;
v_overall_prev = v_overall_next;
T_prev = T_opp;
T_opp = T_next;
iNeigh = iNeighNext;
};
p_NTadditionrates[iVertex].Nn = -totalmassflux_out;
p_NTadditionrates[iVertex].NnTn = -totalheatflux_out;
p_div_v_n[iVertex] = Integrated_div_v / AreaMajor;
}
else {
p_div_v_n[iVertex] = 0.0;
};
}
__global__ void kernelCreateLinearRelationship(
f64 const h_use,
structural * __restrict__ p_info,
OhmsCoeffs* __restrict__ p_Ohms,
v4 * __restrict__ p_v0,
f64 * __restrict__ p_Lap_Az_use,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_coeff_of_vez_upon_viz,
f64 * __restrict__ p_beta_ie_z,
AAdot * __restrict__ p_AAdot_intermediate,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma
)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 const Lap_Az_used = p_Lap_Az_use[iMinor];
structural const info = p_info[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX))
{
v4 v0 = p_v0[iMinor];
// Cancel the part that was added in order to get at Ez_strength:
f64 denom_e = p_denom_e[iMinor];
f64 denom_i = p_denom_i[iMinor];
if (((TESTTRI)) && (0)) printf("\nv0.vez before remove Lapcontrib %1.14E \n", v0.vez);
v0.viz += 0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i; // adaptation for this.
f64 coeff_of_vez_upon_viz = p_coeff_of_vez_upon_viz[iMinor];
f64 cancel_from_vez = -0.5*eoverm*h_use*h_use* c* Lap_Az_used / denom_e
+ coeff_of_vez_upon_viz * 0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i;
v0.vez += cancel_from_vez;
f64 beta_ie_z = p_beta_ie_z[iMinor];
v0.viz += beta_ie_z * cancel_from_vez;
if (((TESTTRI)) && (0)) printf("\n##############\nviz before remove LapAzcontrib %1.14E Lapcontrib %1.14E \n\n",
v0.viz - 0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i,
-0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i
);
// Inadequate because we need to take account of the effect of Lap Az on vez0 via viz0.
// We see now that re-jigging things is absolutely not what we should have done.
// It will make the most complicated overspilling routine, more complicated still.
if (((TESTTRI)) && (0)) printf("own part of effect (we cancel): %1.14E \n"
"via viz (we cancel): coeff %1.14E vizeffect %1.14E\n",
0.5*eoverm*h_use*h_use* c* Lap_Az_used / denom_e,
coeff_of_vez_upon_viz,
-0.5*qoverM*h_use*h_use* c* Lap_Az_used / denom_i);
if (((TESTTRI)) && (0)) printf("v0.vez after remove Lapcontrib %1.14E \n", v0.vez);
OhmsCoeffs Ohms = p_Ohms[iMinor];
f64 vez_1 = v0.vez + Ohms.sigma_e_zz * Ez_strength;
f64 viz_1 = v0.viz + Ohms.sigma_i_zz * Ez_strength;
if (((TESTTRI)) && (0)) printf("vez_1 with Ezcontrib %1.14E sigma_e_zz %1.14E Ez %1.14E vizeffect %1.14E \n", vez_1,
Ohms.sigma_e_zz, Ez_strength, Ohms.sigma_i_zz * Ez_strength);
// Cancelled Lap contrib from vez1 here.
// Be sure we know that makes sense. Is that what we missed on CPU?
nvals n_use = p_n_minor[iMinor];
// AAzdot_k.Azdot +=
// h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*n_use.n*(vie_k.viz - vie_k.vez)); // INTERMEDIATE
// p_AAdot_intermediate[iMinor] = AAzdot_k; // not k any more
#ifdef MIDPT_A
p_Azdot0[iMinor] = p_AAdot_intermediate[iMinor].Azdot
- 0.5*h_use*c*c*Lap_Az_used // cancel out half what PopOhms did!
// + h_use * ROCAzdot_antiadvect[iMinor] // we did this as part of PopOhms.
// + h_use *c*2.0*PI* q*n_use.n*(v_src.viz - v_src.vez) // we did this as part of PopOhms
+ h_use *c*2.0*M_PI* q*n_use.n*(viz_1 - vez_1);
// HALVED:
f64 viz0_coeff_on_Lap_Az = -0.25*h_use*h_use*qoverM*c / denom_i;
f64 vez0_coeff_on_Lap_Az = 0.25* h_use*h_use*eoverm*c / denom_e
+ coeff_of_vez_upon_viz*viz0_coeff_on_Lap_Az;
#else
p_Azdot0[iMinor] = p_AAdot_intermediate[iMinor].Azdot
- h_use*c*c*Lap_Az_used // cancel out what PopOhms did!
// + h_use * ROCAzdot_antiadvect[iMinor] // we did this as part of PopOhms.
// + h_use *c*2.0*PI* q*n_use.n*(v_src.viz - v_src.vez) // we did this as part of PopOhms
+ h_use *c*2.0*M_PI* q*n_use.n*(viz_1 - vez_1);
f64 viz0_coeff_on_Lap_Az = -0.5*h_use*h_use*qoverM*c / denom_i;
f64 vez0_coeff_on_Lap_Az = 0.5* h_use*h_use*eoverm*c / denom_e
+ coeff_of_vez_upon_viz*viz0_coeff_on_Lap_Az;
#endif
viz0_coeff_on_Lap_Az += beta_ie_z*vez0_coeff_on_Lap_Az;
if (((TESTTRI)) && (0)) printf("vez0_coeff_on_Lap undivided %1.14E coeff_viz_on_vez %1.14E viz0_coeff %1.14E denom_e %1.14E\n",
0.5* h_use*h_use*eoverm*c,
coeff_of_vez_upon_viz,
viz0_coeff_on_Lap_Az,
denom_e
);
#ifdef MIDPT_A
p_gamma[iMinor] = h_use*c*c*(0.5 + 0.5*FOURPI_OVER_C * q*n_use.n*
(viz0_coeff_on_Lap_Az - vez0_coeff_on_Lap_Az));
#else
p_gamma[iMinor] = h_use*c*c*(1.0 + 0.5*FOURPI_OVER_C * q*n_use.n*
(viz0_coeff_on_Lap_Az - vez0_coeff_on_Lap_Az));
#endif
// This represents the effect on Azdot of LapAz.
// Did we get this wrong for CPU also?
if (((TESTTRI)) && (0)) {
printf("kernelCLR %d: Azdot_intermed %1.14E Lap_Az_used %1.14E Lapcontrib cancel %1.14E Azdot0 %1.14E\n",
CHOSEN, p_AAdot_intermediate[iMinor].Azdot, Lap_Az_used,
-h_use*c*c*Lap_Az_used,
p_Azdot0[iMinor]);
printf("Jcontrib1 %1.14E viz1 %1.14E vez1 %1.14E\n",
h_use *c*2.0*M_PI* q*n_use.n*(viz_1 - vez_1),
viz_1, vez_1);
printf("gamma %1.14E components: n %1.14E viz0coeff %1.14E vez0coeff %1.14E",
p_gamma[iMinor],
n_use.n, viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az);
}
}
else {
// In PopOhms:
// AAdot temp = p_AAdot_src[iMinor];
// temp.Azdot += h_use * c*(c*p_LapAz[iMinor]
// NO: + 4.0*PI*Jz);
// p_AAdot_intermediate[iMinor] = temp; //
// We need to do the same sort of thing here as in CalcVelocityAzdot :
f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
if (info.flag == REVERSE_JZ_TRI)
{
// We should find a way to set these to exactly what we need for it to work,
// at TriMesh::Initialise and then propagated through the Invoke function.
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
}
#ifdef MIDPT_A
p_Azdot0[iMinor] = p_AAdot_intermediate[iMinor].Azdot - h_use*0.5*c*c*Lap_Az_used
+ h_use*c*FOUR_PI*Jz;
p_gamma[iMinor] = h_use*0.5 * c*c;
#else
p_Azdot0[iMinor] = p_AAdot_intermediate[iMinor].Azdot - h_use*c*c*Lap_Az_used
+ h_use*c*FOUR_PI*Jz;
p_gamma[iMinor] = h_use * c*c;
#endif
if ((info.flag == INNER_FRILL) || (info.flag == OUTER_FRILL))
{
p_Azdot0[iMinor] = 0.0; // difference found? But we did set = 0 on CPU.
p_gamma[iMinor] = 0.0;
}
if (((TESTTRI)) && (0)) printf("kernelCLR %d: Azdot_intermed %1.14E Lap_Az_used %1.14E Azdot0 %1.14E\n",
CHOSEN, p_AAdot_intermediate[iMinor].Azdot, Lap_Az_used, p_Azdot0[iMinor]);
// Note that for frills these will simply not be used.
};
}
__global__ void kernelCreateLinearRelationshipBwd(
f64 const h_use,
structural * __restrict__ p_info,
OhmsCoeffs* __restrict__ p_Ohms,
v4 * __restrict__ p_v0,
f64 * __restrict__ p_Lap_Az_use,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_coeff_of_vez_upon_viz,
f64 * __restrict__ p_beta_ie_z,
AAdot * __restrict__ p_AAdot_k,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma,
f64 * __restrict__ ROCAzdotduetoAdvection
)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 const Lap_Az_used = p_Lap_Az_use[iMinor];
structural const info = p_info[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
v4 v0 = p_v0[iMinor];
// Cancel the part that was added in order to get at Ez_strength:
f64 denom_e = p_denom_e[iMinor];
f64 denom_i = p_denom_i[iMinor];
v0.viz += qoverM*h_use*h_use* c* Lap_Az_used / denom_i; // adaptation for this.
f64 coeff_of_vez_upon_viz = p_coeff_of_vez_upon_viz[iMinor];
f64 cancel_from_vez = -eoverm*h_use*h_use* c* Lap_Az_used / denom_e
+ coeff_of_vez_upon_viz * qoverM*h_use*h_use* c* Lap_Az_used / denom_i;
v0.vez += cancel_from_vez;
f64 beta_ie_z = p_beta_ie_z[iMinor];
v0.viz += beta_ie_z * cancel_from_vez;
// We see now that re-jigging things is absolutely not what we should have done.
// It will make the most complicated overspilling routine, more complicated still.
OhmsCoeffs Ohms = p_Ohms[iMinor];
f64 vez_1 = v0.vez + Ohms.sigma_e_zz * Ez_strength;
f64 viz_1 = v0.viz + Ohms.sigma_i_zz * Ez_strength;
nvals n_use = p_n_minor[iMinor];
p_Azdot0[iMinor] = p_AAdot_k[iMinor].Azdot
+ h_use * ROCAzdotduetoAdvection[iMinor] // our prediction contains this
+ h_use *c*4.0*M_PI* q*n_use.n*(viz_1 - vez_1);
// ROCAzdot_antiadvect --- we need this to be in there only
// on cycles that we do advection
// So do the addition in here.
f64 viz0_coeff_on_Lap_Az = h_use*h_use*qoverM*c / denom_i;
f64 vez0_coeff_on_Lap_Az = h_use*h_use*eoverm*c / denom_e
+ coeff_of_vez_upon_viz*viz0_coeff_on_Lap_Az;
viz0_coeff_on_Lap_Az += beta_ie_z*vez0_coeff_on_Lap_Az;
p_gamma[iMinor] = h_use*c*c*(1.0 + FOURPI_OVER_C * q*n_use.n*
(viz0_coeff_on_Lap_Az - vez0_coeff_on_Lap_Az));
} else {
// We need to do the same sort of thing here as in CalcVelocityAzdot :
f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
if (info.flag == REVERSE_JZ_TRI)
{
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
}
p_Azdot0[iMinor] = p_AAdot_k[iMinor].Azdot + h_use*c*FOUR_PI*Jz;
p_gamma[iMinor] = h_use * c*c;
if ((info.flag == INNER_FRILL) || (info.flag == OUTER_FRILL))
{
p_Azdot0[iMinor] = 0.0; // difference found? But we did set = 0 on CPU.
p_gamma[iMinor] = 0.0;
}
};
}
__global__ void kernelCreateLinearRelationshipBwd_noadvect(
f64 const h_use,
structural * __restrict__ p_info,
OhmsCoeffs* __restrict__ p_Ohms,
v4 * __restrict__ p_v0,
f64 * __restrict__ p_Lap_Az_use,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_coeff_of_vez_upon_viz,
f64 * __restrict__ p_beta_ie_z,
AAdot * __restrict__ p_AAdot_k,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma
)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 const Lap_Az_used = p_Lap_Az_use[iMinor];
structural const info = p_info[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
v4 v0 = p_v0[iMinor];
// Cancel the part that was added in order to get at Ez_strength:
f64 denom_e = p_denom_e[iMinor];
f64 denom_i = p_denom_i[iMinor];
v0.viz += qoverM*h_use*h_use* c* Lap_Az_used/denom_i; // adaptation for this.
f64 coeff_of_vez_upon_viz = p_coeff_of_vez_upon_viz[iMinor];
f64 cancel_from_vez = -eoverm*h_use*h_use* c* Lap_Az_used / denom_e
+ coeff_of_vez_upon_viz * qoverM*h_use*h_use* c* Lap_Az_used / denom_i;
v0.vez += cancel_from_vez;
f64 beta_ie_z = p_beta_ie_z[iMinor];
v0.viz += beta_ie_z * cancel_from_vez;
// We see now that re-jigging things is absolutely not what we should have done.
// It will make the most complicated overspilling routine, more complicated still.
OhmsCoeffs Ohms = p_Ohms[iMinor];
f64 vez_1 = v0.vez + Ohms.sigma_e_zz * Ez_strength;
f64 viz_1 = v0.viz + Ohms.sigma_i_zz * Ez_strength;
nvals n_use = p_n_minor[iMinor];
p_Azdot0[iMinor] = p_AAdot_k[iMinor].Azdot
+ h_use *c*4.0*M_PI* q*n_use.n*(viz_1 - vez_1);
// if ((iMinor - BEGINNING_OF_CENTRAL == VERTCHOSEN) || (iMinor - BEGINNING_OF_CENTRAL == VERTCHOSEN2))
// printf("%d : AAdot_k.Azdot %1.10E n_use.n %1.9E viz1 %1.9E vez1 %1.9E\n",
// iMinor, p_AAdot_k[iMinor].Azdot, n_use.n, viz_1, vez_1);
// ROCAzdot_antiadvect --- we need this to be in there only
// on cycles that we do advection
// So do the addition in here.
// THIS WAS IN ERROR.
f64 viz0_coeff_on_Lap_Az = -h_use*h_use*qoverM*c / denom_i;
f64 vez0_coeff_on_Lap_Az = h_use*h_use*eoverm*c / denom_e
+ coeff_of_vez_upon_viz*viz0_coeff_on_Lap_Az;
viz0_coeff_on_Lap_Az += beta_ie_z*vez0_coeff_on_Lap_Az;
p_gamma[iMinor] = h_use*c*c*(1.0 + FOURPI_OVER_C * q*n_use.n*
(viz0_coeff_on_Lap_Az - vez0_coeff_on_Lap_Az));
} else {
// We need to do the same sort of thing here as in CalcVelocityAzdot :
f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
if (info.flag == REVERSE_JZ_TRI)
{
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
}
p_Azdot0[iMinor] = p_AAdot_k[iMinor].Azdot
+ h_use*c*FOUR_PI*Jz;
p_gamma[iMinor] = h_use * c*c;
if ((info.flag == INNER_FRILL) || (info.flag == OUTER_FRILL))
{
p_Azdot0[iMinor] = 0.0; // difference found? But we did set = 0 on CPU.
p_gamma[iMinor] = 0.0;
}
};
}
/*
__global__ void kernelPopulateOhmsLaw(
f64 h_use,
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
f64_vec3 * __restrict__ p_B,
f64 * __restrict__ p_LapAz,
f64_vec2 * __restrict__ p_GradAz,
f64_vec2 * __restrict__ p_GradTe,
nvals * __restrict__ p_n_minor_use,
nvals * __restrict__ p_one_over_n,
T3 * __restrict__ p_T_minor_use,
v4 * __restrict__ p_vie_src,
f64_vec3 * __restrict__ p_v_n_src,
AAdot * __restrict__ p_AAdot_src,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ ROCAzdotduetoAdvection,
// Now going to need to go through and see this set 0 or sensible every time.
f64_vec3 * __restrict__ p_vn0_dest,
v4 * __restrict__ p_v0_dest,
OhmsCoeffs * __restrict__ p_OhmsCoeffs_dest,
AAdot * __restrict__ p_AAdot_intermediate,
f64 * __restrict__ p_Iz0,
f64 * __restrict__ p_sigma_zz,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_effect_of_viz0_on_vez0,
f64 * __restrict__ p_beta_ie_z,
bool const bSwitchSave,
bool const bUse_dest_n_for_Iz,
nvals * __restrict__ p_n_dest_minor) // for turning on save of these denom_ quantities
{
// Don't forget we can use 16KB shared memory to save a bit of overspill:
// (16*1024)/(512*8) = 4 doubles only for 512 threads. 128K total register space per SM we think.
__shared__ f64 Iz[threadsPerTileMinor], sigma_zz[threadsPerTileMinor];
// __shared__ f64 Iz_k[threadsPerTileMinor];
__shared__ f64_vec2 omega[threadsPerTileMinor], grad_Az[threadsPerTileMinor],
gradTe[threadsPerTileMinor];
// Putting 8 reduces to 256 simultaneous threads. Experiment with 4 in shared.
// f64 viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az; // THESE APPLY TO FEINT VERSION. ASSUME NOT FEINT FIRST.
v4 v0;
f64 denom, ROCAzdot_antiadvect, AreaMinor;
f64_vec3 vn0;
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[iMinor];
// Can see no reason not to put OUTERMOST here. No point creating a big gradient of vz to it.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 vie_k = p_vie_src[iMinor];
f64_vec3 v_n_src = p_v_n_src[iMinor];
nvals n_use = p_n_minor_use[iMinor];
AreaMinor = p_AreaMinor[iMinor];
// Are we better off with operator = or with memcpy?
vn0 = v_n_src;
// if ((TESTTRI)) printf("GPU %d vie_k %1.14E %1.14E\n", iMinor, vie_k.vxy.x, vie_k.vxy.y);
{
f64_vec3 MAR;
memcpy(&MAR, p_MAR_neut + iMinor, sizeof(f64_vec3));
// CHECK IT IS INTENDED TO AFFECT Nv
// REVERTED THE EDIT TO USE 1/n -- THIS WILL NOT GIVE CORRECT M.A.R. EFFECT ON INTEGRAL nv
// We need conservation laws around shock fronts.
vn0.x += h_use * (MAR.x / (AreaMinor*n_use.n_n));
// p_one_over_n[iMinor].n_n/ (AreaMinor));
vn0.y += h_use * (MAR.y/(AreaMinor*n_use.n_n));// MomAddRate is addition rate for Nv. Divide by N.
memcpy(&MAR, p_MAR_ion + iMinor, sizeof(f64_vec3));
v0.vxy = vie_k.vxy + h_use * (m_i*MAR.xypart()/ (n_use.n*(m_i + m_e)*AreaMinor));
v0.viz = vie_k.viz + h_use * MAR.z / (n_use.n*AreaMinor);
if (((TESTTRI))) printf("\nGPU %d vxyk %1.10E %1.10E aMAR_i.y %1.10E MAR.y %1.10E 1/n %1.10E Area %1.10E\n", iMinor,
v0.vxy.x, v0.vxy.y,
h_use * (m_i*MAR.y / (n_use.n*(m_i + m_e)*AreaMinor)),
MAR.y,
p_one_over_n[iMinor].n,
AreaMinor);
memcpy(&MAR, p_MAR_elec + iMinor, sizeof(f64_vec3));
v0.vxy += h_use * (m_e*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor); // UM WHY WAS THIS NEGATIVE
// + !!!!
if (v0.vez != v0.vez) printf("NANVEZ %d v_k %1.9E MAR.z %1.9E \n", iMinor, vie_k.vez, MAR.z);
if (((TESTTRI))) printf("\nGPU %d a:MAR_e %1.10E %1.10E MAR.y %1.10E 1/n %1.10E Area %1.10E\n", iMinor,
h_use * (m_e*MAR.x/ (n_use.n*(m_i + m_e)*AreaMinor)),
h_use * (m_e*MAR.y/ (n_use.n*(m_i + m_e)*AreaMinor)),
MAR.y,
p_one_over_n[iMinor].n, AreaMinor);
// if (((TESTTRI)))
// printf("GPU %d WITH MAR v0.vxy %1.14E %1.14E\n", CHOSEN, v0.vxy.x, v0.vxy.y);
// printf("GPU %d data_k %1.10E %1.10E MAR %1.10E %1.10E\n", CHOSEN, vie_k.vxy.x, vie_k.vxy.y,
// MAR.x, MAR.y);
// printf("GPU %d n %1.12E AreaMinor %1.12E \n", CHOSEN, n_use.n, AreaMinor);
// }
}
OhmsCoeffs ohm;
f64 beta_ie_z, LapAz;
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_minor_use[iMinor];
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
//nu_ne_MT = s_en_MT * electron_thermal * n_use.n; // have to multiply by n_e for nu_ne_MT
//nu_ni_MT = s_in_MT * ionneut_thermal * n_use.n;
//nu_in_MT = s_in_MT * ionneut_thermal * n_use.n_n;
//nu_en_MT = s_en_MT * electron_thermal * n_use.n_n;
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*n_use.n*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
if (nu_eiBar != nu_eiBar) printf("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n"
"iMinor %d n_use.n %1.9E lnLambda %1.9E Te %1.9E sqrt %1.9E \n",
iMinor, n_use.n, lnLambda, T.Te, sqrt_Te);
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
}
}
vn0.x += -0.5*h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n)*(v_n_src.x - vie_k.vxy.x)
- 0.5*h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n)*(v_n_src.x - vie_k.vxy.x);
vn0.y += -0.5*h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n)*(v_n_src.y - vie_k.vxy.y)
- 0.5*h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n)*(v_n_src.y - vie_k.vxy.y);
vn0.z += -0.5*h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n)*(v_n_src.z - vie_k.vez)
- 0.5*h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n)*(v_n_src.z - vie_k.viz);
denom = 1.0 + h_use * 0.5*M_e_over_en* (cross_section_times_thermal_en*n_use.n)
+ 0.5*h_use*M_i_over_in* (cross_section_times_thermal_in*n_use.n);
vn0 /= denom; // It is now the REDUCED value
if (((TESTTRI)))
printf("GPU %d vn0 %1.9E %1.9E %1.9E denom %1.14E \n", CHOSEN, vn0.x, vn0.y, vn0.z, denom);
ohm.beta_ne = 0.5*h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n) / denom;
ohm.beta_ni = 0.5*h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n) / denom;
// Now we do vexy:
grad_Az[threadIdx.x] = p_GradAz[iMinor];
gradTe[threadIdx.x] = p_GradTe[iMinor];
LapAz = p_LapAz[iMinor];
f64 ROCAzdot_antiadvect = ROCAzdotduetoAdvection[iMinor];
if (((TESTTRI))) printf("GPU %d: LapAz %1.14E\n", CHOSEN, LapAz);
// %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
// Here is where we should be using v_use:
// We do midpoint instead? Why not? Thus allowing us not to load v_use.
// %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
v0.vxy +=
-h_use * (q / (2.0*c*(m_i + m_e)))*(vie_k.vez - vie_k.viz)*grad_Az[threadIdx.x]
- (h_use / (2.0*(m_i + m_e)))*(m_n*M_i_over_in*(cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*
(vie_k.vxy - v_n_src.xypart() - vn0.xypart());
if (((TESTTRI))) printf("GPU %d vzgradAz contrib_k %1.10E %1.10E vez_k viz_k %1.9E %1.9E gradAz %1.9E %1.9E\n", iMinor,
-h_use * (q / (2.0*c*(m_i + m_e)))*(vie_k.vez - vie_k.viz)*grad_Az[threadIdx.x].x,
-h_use * (q / (2.0*c*(m_i + m_e)))*(vie_k.vez - vie_k.viz)*grad_Az[threadIdx.x].y, vie_k.vez, vie_k.viz,
grad_Az[threadIdx.x].x, grad_Az[threadIdx.x].y);
denom = 1.0 + (h_use / (2.0*(m_i + m_e)))*(
m_n* M_i_over_in* (cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*(1.0 - ohm.beta_ne - ohm.beta_ni);
v0.vxy /= denom;
//
if (((TESTTRI)))
printf("GPU %d v0.vxy %1.14E %1.14E denom %1.14E \n"
"nu_in_MT %1.14E nu_en_MT %1.14E beta_ne %1.14E \n",
CHOSEN, v0.vxy.x, v0.vxy.y, denom,
cross_section_times_thermal_in*n_use.n_n, cross_section_times_thermal_en*n_use.n_n, ohm.beta_ne);
ohm.beta_xy_z = (h_use * q / (2.0*c*(m_i + m_e)*denom)) * grad_Az[threadIdx.x];
/////////////////////////////////////////////////////////////////////////////// midpoint
// if (((TESTTRI))) printf("ohm.beta_xy_z %1.14E \n", ohm.beta_xy_z);
omega[threadIdx.x] = qovermc*p_B[iMinor].xypart();
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*(nu_eHeart*nu_eHeart + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].x*omega[threadIdx.x].x + omega[threadIdx.x].y*omega[threadIdx.x].y + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)));
// if (nu_ei_effective != nu_ei_effective) printf("nu_ei NaN: omega %1.8E %1.8E nu_eHeart %1.8E nu_eiBar %1.8E\n",
// omega[threadIdx.x].x, omega[threadIdx.x].y, nu_eHeart, nu_eiBar);
AAdot AAzdot_k = p_AAdot_src[iMinor];
//if ((iPass == 0) || (bFeint == false))
{
// if (((TESTTRI)) && (0)) printf("viz0: %1.14E\n", v0.viz);
if (((TESTTRI))) printf("GPU %d: LapAz %1.14E\n", CHOSEN, LapAz); // nonzero
v0.viz +=
-0.5*h_use*qoverMc*(2.0*AAzdot_k.Azdot
+ h_use * ROCAzdot_antiadvect + h_use * c*c*(LapAz
+ FOURPI_OVER_C*0.5 * q*n_use.n*(vie_k.viz - vie_k.vez)))
- 0.5*h_use*qoverMc*(vie_k.vxy + v0.vxy).dot(grad_Az[threadIdx.x]);
if (((TESTTRI))) {
printf("viz0 I: %1.14E contribs:\n", v0.viz);
printf(" Azdotk %1.14E \n ROC %1.14E\n JviaAzdot %1.14E\n lorenzmag %1.14E\n",
-0.5*h_use*qoverMc*(2.0*AAzdot_k.Azdot),
-0.5*h_use*qoverMc*h_use * ROCAzdot_antiadvect,
-0.5*h_use*qoverMc*h_use * c*c*(FOURPI_OVER_C*0.5 * q*n_use.n*(vie_k.viz - vie_k.vez)),
-0.5*h_use*qoverMc*(vie_k.vxy + v0.vxy).dot(grad_Az[threadIdx.x])
);
printf("due to LapAz: %1.14E = %1.6E %1.6E %1.6E %1.6E\n",
-0.5*h_use*qoverMc*h_use *c*c*LapAz,
h_use*h_use*0.5,
qoverMc,
c*c,
LapAz); // == 0
};
}
//else {
// viz0 = data_k.viz
// - h_use * MomAddRate.ion.z / (data_use.n*AreaMinor)
// - 0.5*h_use*qoverMc*(2.0*data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(TWOPIoverc * q*data_use.n*(data_k.viz - data_k.vez)))
// - 0.5*h_use*qoverMc*(data_k.vxy + vxy0).dot(grad_Az[threadIdx.x]);
// };
//
// Still omega_ce . Check formulas.
//
v0.viz +=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x])));
// if (((TESTTRI))) printf("viz0 with thermal force %1.14E \n", v0.viz);
v0.viz += -h_use * 0.5*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(vie_k.viz - v_n_src.z - vn0.z) // THIS DOESN'T LOOK RIGHT
+ h_use * 0.5*(moverM)*nu_ei_effective*(vie_k.vez - vie_k.viz);
if (((TESTTRI))) printf("viz0 contrib i-n %1.14E contrib e-i %1.14E\nviz0 %1.14E\n",
-h_use * 0.5*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(vie_k.viz - v_n_src.z - vn0.z),
h_use * 0.5*(moverM)*nu_ei_effective*(vie_k.vez - vie_k.viz), v0.viz
);
denom = 1.0 + h_use * h_use*M_PI*qoverM*q*n_use.n + h_use * 0.5*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) +
h_use * 0.5*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(1.0 - ohm.beta_ni) + h_use * 0.5*moverM*nu_ei_effective;
if (bSwitchSave) p_denom_i[iMinor] = denom;
// viz0_coeff_on_Lap_Az = -0.5*h_use*qoverMc*h_use*c*c / denom;
v0.viz /= denom;
if (((TESTTRI))) printf("viz0 divided %1.14E denom %1.14E \n", v0.viz, denom);
ohm.sigma_i_zz = h_use * qoverM / denom;
beta_ie_z = (h_use*h_use*M_PI*qoverM*q*n_use.n
+ 0.5*h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))
+ h_use * 0.5*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne
+ h_use * 0.5*moverM*nu_ei_effective) / denom;
if (((TESTTRI2))) printf("vez0 %1.14E \n", v0.vez);
v0.vez +=
h_use * 0.5*qovermc*(2.0*AAzdot_k.Azdot
+ h_use * ROCAzdot_antiadvect
+ h_use * c*c*(LapAz
+ 0.5*FOURPI_Q_OVER_C*n_use.n*(vie_k.viz + v0.viz - vie_k.vez))) // ?????????????????
+ 0.5*h_use*qovermc*(vie_k.vxy + v0.vxy + v0.viz * ohm.beta_xy_z).dot(grad_Az[threadIdx.x]);
if (((TESTTRI2)))
printf(" %d v0.vez %1.14E Azdotctb %1.14E antiadvect %1.14E LapAzctb %1.14E \n"
"%d JviaAzdot %1.14E lorenzmag %1.14E \n",
iMinor, v0.vez, h_use * 0.5*qovermc*2.0*AAzdot_k.Azdot,
h_use * 0.5*qovermc*h_use * ROCAzdot_antiadvect,
h_use * 0.5*qovermc*h_use * c*c*LapAz,
iMinor,
h_use * 0.5*qovermc*h_use * c*c* 0.5*FOURPI_Q_OVER_C*n_use.n*(vie_k.viz + v0.viz - vie_k.vez),
0.5*h_use*qovermc*(vie_k.vxy + v0.vxy + v0.viz * ohm.beta_xy_z).dot(grad_Az[threadIdx.x])
);
// implies:
f64 effect_of_viz0_on_vez0 =
h_use * 0.5*qovermc*h_use * c*c*0.5*FOURPI_Q_OVER_C*n_use.n
+ 0.5*h_use*qovermc*( ohm.beta_xy_z.dot(grad_Az[threadIdx.x]));
v0.vez -=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x])+ qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT));
if (((TESTTRI2)))
printf("%d v0.vez TF contrib : %1.14E nu_eiBar %1.14E nu_eHeart %1.14E \n"
"%d omega %1.14E %1.14E %1.14E\n",iMinor,
-1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]) + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)),
nu_eiBar, nu_eHeart, iMinor,
omega[threadIdx.x].x, omega[threadIdx.x].y, qovermc*BZ_CONSTANT);
// could store this from above and put opposite -- dividing by m_e instead of m_i
v0.vez += -0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(vie_k.vez - v_n_src.z - vn0.z - ohm.beta_ni * v0.viz)
- 0.5*h_use*nu_ei_effective*(vie_k.vez - vie_k.viz - v0.viz);
// implies:
effect_of_viz0_on_vez0 +=
0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni + 0.5*h_use*nu_ei_effective;
if (
//(iMinor == 11761 + BEGINNING_OF_CENTRAL) ||
//(iMinor == 11616 + BEGINNING_OF_CENTRAL) ||
//(iMinor == 11762 + BEGINNING_OF_CENTRAL) ||
((TESTTRI2)) )
{
printf("%d cross_section_times_thermal_en %1.10E n_use.n_n %1.10E vezk %1.10E vez0 %1.10E Mnoverne %1.10E nu_ei_effective %1.10E \n",
iMinor, cross_section_times_thermal_en, n_use.n_n,
vie_k.vez, v0.vez,
M_n_over_ne, nu_ei_effective);
}
if (((TESTTRI2)))
printf("v0.vez contribs e-n e-i: %1.14E %1.14E v0.viz %1.14E\n",
-0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(vie_k.vez - v_n_src.z - vn0.z - ohm.beta_ni * v0.viz),
- 0.5*h_use*nu_ei_effective*(vie_k.vez - vie_k.viz - v0.viz),
v0.viz);
denom = 1.0 + (h_use*h_use*M_PI*q*eoverm*n_use.n
+ 0.5*h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z)
+ 0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z)
+ 0.5*h_use*nu_ei_effective*(1.0 - beta_ie_z);
// vez0_coeff_on_Lap_Az = h_use * h_use*0.5*qovermc* c*c / denom;
ohm.sigma_e_zz =
(-h_use * eoverm
+ h_use * h_use*M_PI*q*eoverm*n_use.n*ohm.sigma_i_zz
+ h_use * 0.5*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*ohm.sigma_i_zz
+ 0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni*ohm.sigma_i_zz
+ 0.5*h_use*nu_ei_effective*ohm.sigma_i_zz)
/ denom;
// if (((TESTTRI)1) || ((TESTTRI)2))
//printf("GPU %d vez0 before divide %1.14E \n", iMinor, v0.vez);
//
v0.vez /= denom;
effect_of_viz0_on_vez0 /= denom; // of course
//if (v0.vez != v0.vez) {
// printf("iMinor %d v0.vez %1.10E ohm.sigma_e %1.10E denom %1.10E \n"
// "%1.10E %1.10E %1.10E %1.10E n %1.10E Te %1.10E\n" ,
// iMinor, v0.vez, ohm.sigma_e_zz, denom,
// h_use*h_use*M_PI*q*eoverm*n_use.n,//*(1.0 - beta_ie_z) // this was ok
// 0.5*h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*(1.0 - beta_ie_z), // this was not ok
// 0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z),
// 0.5*h_use*nu_ei_effective,//*(1.0 - beta_ie_z) // this was not ok -- even though n,T come out ok
// n_use.n, T.Te);
//}
if ( ((TESTTRI2)))
printf("GPU %d v0.vez %1.14E denom %1.14E \n"
"ohm.sigma_e_zz %1.14E n_use %1.10E nn %1.10E Te %1.10E\n"
"%d %1.12E %1.12E %1.12E %1.12E %1.12E \n"
"%d denom %1.14E : %1.12E %1.12E %1.12E %1.12E\n",
iMinor, v0.vez, denom,
ohm.sigma_e_zz,
n_use.n,n_use.n_n, T.Te, iMinor, -h_use * eoverm,
h_use * h_use*M_PI*q*eoverm*n_use.n*ohm.sigma_i_zz,
h_use * 0.5*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*ohm.sigma_i_zz,
0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni*ohm.sigma_i_zz,
0.5*h_use*nu_ei_effective*ohm.sigma_i_zz,
iMinor, denom,
(h_use*h_use*M_PI*q*eoverm*n_use.n)*(1.0 - beta_ie_z),
(0.5*h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z),
0.5*h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z),
0.5*h_use*nu_ei_effective*(1.0 - beta_ie_z)
);
if (bSwitchSave) {
p_denom_e[iMinor] = denom;
p_effect_of_viz0_on_vez0[iMinor] = effect_of_viz0_on_vez0;
p_beta_ie_z[iMinor] = beta_ie_z; // see that doing it this way was not best.
} else {
// #########################################################################################################
// DEBUG: pass graphing parameters through these.
// #########################################################################################################
p_denom_i[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective;
p_denom_e[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n /
(M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective);
};
// Now update viz(Ez):
v0.viz += beta_ie_z * v0.vez;
ohm.sigma_i_zz += beta_ie_z * ohm.sigma_e_zz;
// sigma_e_zz and sigma_i_zz are change in vz for a change in Ez
{
f64 EzShape = GetEzShape(info.pos.modulus());
ohm.sigma_i_zz *= EzShape;
ohm.sigma_e_zz *= EzShape;
}
// Think maybe we should get rid of most of this routine out of the subcycle.
// Rate of acceleration over timestep due to resistance, pressure, thermal force etc could be stored.
// Saving off some eqn data isn't so bad when we probably overflow registers and L1 here anyway.
// All we need is to know that we update sigma
// We can do addition of
// ==============================================================================================
p_v0_dest[iMinor] = v0;
p_OhmsCoeffs_dest[iMinor] = ohm;
p_vn0_dest[iMinor] = vn0;
if (bUse_dest_n_for_Iz) {
f64 ndest = p_n_dest_minor[iMinor].n;
Iz[threadIdx.x] = q*AreaMinor*ndest*(v0.viz - v0.vez);
sigma_zz[threadIdx.x] = q*AreaMinor*ndest*(ohm.sigma_i_zz - ohm.sigma_e_zz);
if (((TESTTRI2))) {
printf( "ndest %1.12E sigma_zz/Area %1.12E AreaMinor %1.12E\n\n",
ndest, q*ndest*(ohm.sigma_i_zz - ohm.sigma_e_zz), AreaMinor);
}
} else {
// On intermediate substeps, the interpolated n that applies halfway through the substep is a reasonable choice...
Iz[threadIdx.x] = q*AreaMinor*n_use.n*(v0.viz - v0.vez);
sigma_zz[threadIdx.x] = q*AreaMinor*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz);
// I'm sure we can do better on this. But we also might prefer to excise a lot of this calc from the subcycle.
if (((TESTTRI2))) {
printf("n_use.n %1.12E sigma_zz/Area %1.12E AreaMinor %1.12E\n\n",
n_use.n, q*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz), AreaMinor);
}
}
// Totally need to be skipping the load of an extra n.
// ^^ old remark.
// But it's too messy never loading it. t_half means changing all the
// Iz formula to involve v_k. Don't want that.
// if (blockIdx.x == 340) printf("%d: %1.14E %1.14E \n",
// iMinor, q*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz), sigma_zz[threadIdx.x]);
// On iPass == 0, we need to do the accumulate.
// p_Azdot_intermediate[iMinor] = Azdot_k
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*n_use.n*(data_k.viz - data_k.vez)); // INTERMEDIATE
//if ((0) && ((TESTTRI))) printf("******************* AAzdot_k.Azdot %1.14E \n", AAzdot_k.Azdot);
AAzdot_k.Azdot +=
h_use * ROCAzdot_antiadvect + h_use * c*c*(LapAz +
0.5*FOURPI_OVER_C * q*n_use.n*(vie_k.viz - vie_k.vez)); // INTERMEDIATE
p_AAdot_intermediate[iMinor] = AAzdot_k; // not k any more
//Iz_k[threadIdx.x] = q*n_use.n*(vie_k.viz - vie_k.vez)*AreaMinor;
//if ((0) && ((TESTTRI))) {
// printf("\n!!! kernelPopOhms GPU %d: \n******* Azdot_intermediate %1.14E vie_k %1.14E %1.14E\n"
// "antiadvect %1.10E Lapcontrib %1.13E Jcontrib_k %1.14E\n\n",
// CHOSEN, p_AAdot_intermediate[iMinor].Azdot,
// vie_k.viz, vie_k.vez,
// h_use * ROCAzdot_antiadvect,
// h_use * c*c*LapAz,
// h_use * c*c*0.5*FOURPI_OVER_C * q*n_use.n*(vie_k.viz - vie_k.vez)
// );
//}
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(LapAz +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz + data_1.viz
// - data_k.vez - data_1.vez));
} else {
// Non-domain triangle or vertex
// ==============================
// Need to decide whether crossing_ins triangle will experience same accel routine as the rest?
// I think yes so go and add it above??
// We said v_r = 0 necessarily to avoid sending mass into ins.
// So how is that achieved there? What about energy loss?
// Need to determine a good way. Given what v_r in tri represents. We construe it to be AT the ins edge so
// ...
Iz[threadIdx.x] = 0.0;
sigma_zz[threadIdx.x] = 0.0;
if ((iMinor < BEGINNING_OF_CENTRAL) && ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)))
{
p_AAdot_intermediate[iMinor].Azdot = 0.0;
// Set Az equal to neighbour in every case, after Accelerate routine.
} else {
// Let's make it go right through the middle of a triangle row for simplicity.
//f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
//{
// // Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// // ASSUME we are fed Iz_prescribed.
// //Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
// AreaMinor = p_AreaMinor[iMinor];
// Jz = negative_Iz_per_triangle / AreaMinor; // Iz would come from multiplying back by area and adding.
//};
AAdot temp = p_AAdot_src[iMinor];
temp.Azdot += h_use * c*(c*p_LapAz[iMinor]);// +4.0*M_PI*Jz);
// + h_use * ROCAzdot_antiadvect // == 0
p_AAdot_intermediate[iMinor] = temp; //
};
};
__syncthreads();
// .Collect Jz = Jz0 + sigma_zz Ez_strength on each minor cell
// .Estimate Ez
// sigma_zz should include EzShape for this minor cell
// The mission if iPass == 0 was passed is to save off Iz0, SigmaIzz.
// First pass set Ez_strength = 0.0.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + k];
Iz[threadIdx.x] += Iz[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + s - 1];
Iz[threadIdx.x] += Iz[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sigma_zz[blockIdx.x] = sigma_zz[0];
p_Iz0[blockIdx.x] = Iz[0];
}
// Wish to make the Jz contribs to Azdot on each side of the ins exactly equal in L1,
// meant making this long routine even longer with collecting Iz_k.
}
*/
__global__ void kernelCollectOhmsGraphs(
structural * __restrict__ p_info_major,
f64_vec3 * __restrict__ p_MAR_ion_pressure_major,
f64_vec3 * __restrict__ p_MAR_ion_visc_major,
f64_vec3 * __restrict__ p_MAR_elec_pressure_major, // need to distinguish viscous from pressure part.
f64_vec3 * __restrict__ p_MAR_elec_visc_major,
f64_vec3 * __restrict__ p_MAR_elec_ionization_major,
f64_vec3 * __restrict__ p_B_major,
v4 * __restrict__ p_vie_k, // ALL MAJOR
v4 * __restrict__ p_vie_kplus,
f64_vec2 * __restrict__ p_GradTe_major,
nvals * __restrict__ p_n_major_use,
T3 * __restrict__ p_T_major_use,
AAdot * __restrict__ p_AAdot_kplus,
f64 * __restrict__ p_AreaMinor, // EXCEPT THIS ONE
f64 * __restrict__ p_Ohmsgraph_0, // elastic effective frictional coefficient zz
f64 * __restrict__ p_Ohmsgraph_1, // ionization effective frictional coefficient zz
f64 * __restrict__ p_Ohmsgraph_2, // 2 is combined y pressure accel rate
f64 * __restrict__ p_Ohmsgraph_3,// 3 is q/(M+m) Ez -- do we have
f64 * __restrict__ p_Ohmsgraph_4, // 4 is thermal force accel
f64 * __restrict__ p_Ohmsgraph_5, // T_zy
f64 * __restrict__ p_Ohmsgraph_6, // T_zz
f64 * __restrict__ p_Ohmsgraph_7, // T acting on pressure
f64 * __restrict__ p_Ohmsgraph_8, // T acting on electromotive
f64 * __restrict__ p_Ohmsgraph_9, // T acting on thermal force
f64 * __restrict__ p_Ohmsgraph_10, // prediction vez-viz
f64 * __restrict__ p_Ohmsgraph_11, // difference of prediction from vez_k
f64 * __restrict__ p_Ohmsgraph_12, // progress towards eqm: need vez_k+1
f64 * __restrict__ p_Ohmsgraph_13, // viscous acceleration of electrons and ions (z)
f64 * __restrict__ p_Ohmsgraph_14, // Prediction of Jz
f64 * __restrict__ p_Ohmsgraph_15, // sigma zy
f64 * __restrict__ p_Ohmsgraph_16, // sigma zz
f64 * __restrict__ p_Ohmsgraph_17, // sigma zz times electromotive
f64 * __restrict__ p_Ohmsgraph_18 // Difference of prediction from Jz predicted.
)
{
// Don't forget we can use 16KB shared memory to save a bit of overspill:
// (16*1024)/(512*8) = 4 doubles only for 512 threads. 128K total register space per SM we think.
__shared__ f64_vec2 gradTe[threadsPerTileMinor];
f64_vec3 omega_ce;
// Putting 8 reduces to 256 simultaneous threads. Experiment with 4 in shared.
// f64 viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az; // THESE APPLY TO FEINT VERSION. ASSUME NOT FEINT FIRST.
v4 v0;
f64 denom, AreaMinor;
long const iVertex = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_major[iVertex];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
v4 vie_k = p_vie_k[iVertex];
// f64_vec3 v_n_src = p_v_n_src[iMinor];
nvals n_use = p_n_major_use[iVertex];
AreaMinor = p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL];
f64_vec3 MAR_elec, MAR_ion;
memcpy(&MAR_elec, p_MAR_elec_ionization_major + iVertex, sizeof(f64_vec3));
p_Ohmsgraph_1[iVertex] = (MAR_elec.z / (n_use.n*AreaMinor*vie_k.vez));
// // ionization effective frictional coefficient zz
memcpy(&MAR_ion, p_MAR_ion_pressure_major + iVertex, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec_pressure_major + iVertex, sizeof(f64_vec3));
p_Ohmsgraph_2[iVertex] = (m_i*MAR_ion.y + m_e*MAR_elec.y)/((m_i + m_e)*(n_use.n*AreaMinor));
memcpy(&MAR_ion, p_MAR_ion_visc_major + iVertex, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec_visc_major + iVertex, sizeof(f64_vec3));
p_Ohmsgraph_13[iVertex] = (m_i*MAR_ion.y + m_e*MAR_elec.y) / ((m_i + m_e)*(n_use.n*AreaMinor));
// v0.vxy += h_use * (m_e*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
//v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_major_use[iVertex];
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*n_use.n*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
}
}
gradTe[threadIdx.x] = p_GradTe_major[iVertex];
omega_ce = qovermc*p_B_major[iVertex];
omega_ce.z = BZ_CONSTANT*qovermc;
f64 nu_ei_effective =
nu_eiBar * (1.0 - 0.9*nu_eiBar*
(nu_eHeart*nu_eHeart + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))));
p_Ohmsgraph_0[iVertex] = nu_ei_effective + M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n);
AAdot AAzdot_kplus = p_AAdot_kplus[iVertex];
p_Ohmsgraph_3[iVertex] = -(q * (m_i + m_e)/(m_e*m_i))*(Ez_strength*GetEzShape(info.pos.modulus()) - AAzdot_kplus.Azdot / c);
//v0.viz +=
// 1.5*h_use*nu_eiBar*(
// (omega_ce.x*qovermc*BZ_CONSTANT - nu_eHeart * omega_ce.y)*gradTe[threadIdx.x].x +
// (omega_ce.y*qovermc*BZ_CONSTANT + nu_eHeart * omega_ce.x)*gradTe[threadIdx.x].y) /
// (m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)));
//v0.viz += h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *vn0.z;
// denom = 1.0 + h_use * h_use*4.0*M_PI*qoverM*q*n_use.n
// + h_use * qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) +
// h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(1.0 - ohm.beta_ni)
// + h_use *moverM*nu_ei_effective;
// v0.viz /= denom;
// implies:
//f64 effect_of_viz0_on_vez0 =
// h_use * qovermc*h_use * c*c* FOURPI_Q_OVER_C*n_use.n
// + h_use*qovermc*(ohm.beta_xy_z.dot(grad_Az[threadIdx.x]));
// remember it's ve-vi :
p_Ohmsgraph_4[iVertex] = -1.5*nu_eiBar*((
(omega_ce.x*qovermc*BZ_CONSTANT - nu_eHeart * omega_ce.y)*gradTe[threadIdx.x].x +
(omega_ce.y*qovermc*BZ_CONSTANT + nu_eHeart * omega_ce.x)*gradTe[threadIdx.x].y) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))))*(1.0 / m_i + 1.0 / m_e);
// Now it's time to work out T_zy and T_zz:
f64 Tzy, Tzz;
f64 a_ = -0.9*nu_eiBar*nu_eiBar / (nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)));
f64 b_ = 1.0 - a_*nu_eHeart;
f64 c_ = M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) + nu_eiBar + nu_eHeart*nu_eHeart*a_;
Tzy = ((a_*b_*omega_ce.dot(omega_ce) + b_*c_)*omega_ce.x + (b_*b_ - a_*c_)*omega_ce.y*omega_ce.z) /
((a_*omega_ce.dot(omega_ce) + c_)*(b_*b_*omega_ce.dot(omega_ce) + c_*c_));
Tzz = (c_*c_ + a_*c_*omega_ce.dot(omega_ce) + (b_*b_-a_*c_)*omega_ce.z*omega_ce.z) /
((a_*omega_ce.dot(omega_ce) + c_)*(b_*b_*omega_ce.dot(omega_ce) + c_*c_));
// Notice: if omega_ce = 0 then we get Tzz = c*c/c*c*c = 1/c.
// a = -0.9*nu_eiBar^2/(nu_eHeart^3)
// Tzz = 1/(nu_en + nu_ei + nu_ei^2/(1.87nu_ei+nu_en)^3)
// Not sure that is what it's supposed to be.... it's close. Remember we had e-i.
p_Ohmsgraph_5[iVertex] = Tzy;
p_Ohmsgraph_6[iVertex] = Tzz;
p_Ohmsgraph_7[iVertex] = Tzy*p_Ohmsgraph_2[iVertex];
p_Ohmsgraph_8[iVertex] = Tzz*p_Ohmsgraph_3[iVertex];
p_Ohmsgraph_9[iVertex] = Tzz*p_Ohmsgraph_4[iVertex];
p_Ohmsgraph_10[iVertex] = p_Ohmsgraph_7[iVertex] + p_Ohmsgraph_8[iVertex] + p_Ohmsgraph_9[iVertex];
if (iVertex == VERTCHOSEN) printf("\n\nOhmsgraphs info %d : omega %1.8E %1.8E %1.8E abc %1.8E %1.8E %1.8E\n"
"nu_eiBar nu_eHeart nu_en %1.10E %1.10E %1.10E nu 1 and 2 %1.9E %1.9E ; \naccels 2 3 4 %1.9E %1.9E %1.9E\n"
"Tzy Tzz %1.9E %1.9E prediction %1.9E \n",
iVertex, omega_ce.x, omega_ce.y, omega_ce.z,
a_, b_, c_,
nu_eiBar, nu_eHeart, n_use.n_n*s_en_visc*electron_thermal,
p_Ohmsgraph_0[iVertex],p_Ohmsgraph_1[iVertex], p_Ohmsgraph_2[iVertex], p_Ohmsgraph_3[iVertex], p_Ohmsgraph_4[iVertex],
Tzy, Tzz, p_Ohmsgraph_10[iVertex]
);
v4 vie_kplus = p_vie_kplus[iVertex];
p_Ohmsgraph_11[iVertex] = p_Ohmsgraph_10[iVertex] - vie_k.vez + vie_k.viz;
p_Ohmsgraph_12[iVertex] = vie_kplus.vez - vie_kplus.viz - vie_k.vez + vie_k.viz;
p_Ohmsgraph_14[iVertex] = -q*n_use.n*p_Ohmsgraph_10[iVertex];
p_Ohmsgraph_15[iVertex] = eoverm*n_use.n*Tzy;
p_Ohmsgraph_16[iVertex] = eoverm*n_use.n*Tzz;
p_Ohmsgraph_17[iVertex] = -q*n_use.n*Tzz*p_Ohmsgraph_3[iVertex];
p_Ohmsgraph_18[iVertex] = p_Ohmsgraph_14[iVertex] - q*n_use.n*(vie_kplus.viz - vie_kplus.vez);
} else {
// Non-domain triangle or vertex
// ==============================
}
}
__global__ void MeasureAccelz(
structural * __restrict__ p_info,
v4 * __restrict__ p_vie_initial,
v4 * __restrict__ p_vie_final,
f64_vec3 * __restrict__ p_v_nk,
f64_vec3 * __restrict__ p_v_nkplus1,
f64 const h_use, // substep
f64_vec2 * __restrict__ pGradAz,
f64_vec2 * __restrict__ pGradTe,
AAdot * __restrict__ p_AAdot,
AAdot * __restrict__ p_AAdot_k,
f64 * __restrict__ pLapAz,
nvals * __restrict__ p_n_central,
T3 * __restrict__ p_T_central,
f64_vec3 * __restrict__ p_B,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
f64_vec3 * __restrict__ p_MAR_neut,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_arelz,
f64 * __restrict__ p_MAR_ion_effect,
f64 * __restrict__ p_MAR_elec_effect,
f64 * __restrict__ p_Ezext_electromotive,
f64 * __restrict__ p_inductive_electromotive,
f64 * __restrict__ p_vxB,
f64 * __restrict__ p_thermal_force_effect,
f64 * __restrict__ p_friction_neutrals,
f64 * __restrict__ p_friction_ei,
f64 * __restrict__ p_sum_of_effects,
f64 * __restrict__ p_difference
) {
long iVertex = blockDim.x*blockIdx.x + threadIdx.x;
structural info = p_info[iVertex];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 vie_i = p_vie_initial[iVertex];
v4 vie_f = p_vie_final[iVertex];
f64 accel;
p_arelz[iVertex] = (vie_f.vez - vie_f.viz - vie_i.vez + vie_i.viz) / h_use;
f64_vec2 Grad_Az = pGradAz[iVertex];
f64_vec2 gradTe = pGradTe[iVertex];
f64 Azdot = p_AAdot[iVertex].Azdot;
f64 dAzdt_k = p_AAdot_k[iVertex].Azdot;
f64 AreaMinor = p_AreaMinor[iVertex];
nvals n_use = p_n_central[iVertex];
if (iVertex == VERTCHOSEN) printf("iVertex = %d BOC = %d sum = %d \n",
iVertex, BEGINNING_OF_CENTRAL, iVertex + BEGINNING_OF_CENTRAL);
f64_vec3 MAR, MAR_ion, MAR_elec;
memcpy(&MAR_ion, p_MAR_ion + iVertex, sizeof(f64_vec3));
p_MAR_ion_effect[iVertex] = -MAR_ion.z / (n_use.n*AreaMinor); // note minus
memcpy(&MAR_elec, p_MAR_elec + iVertex, sizeof(f64_vec3));
p_MAR_elec_effect[iVertex] = MAR_elec.z / (n_use.n*AreaMinor);
p_Ezext_electromotive[iVertex] = -(eoverm + qoverM) * GetEzShape(info.pos.modulus()) * Ez_strength;
p_inductive_electromotive[iVertex] = (eoverm + qoverM) *Azdot / c;
p_vxB[iVertex] = (qovermc+qoverMc)*Grad_Az.dot(vie_f.vxy);
f64_vec3 omega_ce;
omega_ce.x = p_B[iVertex].x*qovermc;
omega_ce.y = p_B[iVertex].y*qovermc;
omega_ce.z = BZ_CONSTANT*qovermc;
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_central[iVertex];
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
//nu_ne_MT = s_en_MT * electron_thermal * n_use.n; // have to multiply by n_e for nu_ne_MT
//nu_ni_MT = s_in_MT * ionneut_thermal * n_use.n;
//nu_in_MT = s_in_MT * ionneut_thermal * n_use.n_n;
//nu_en_MT = s_en_MT * electron_thermal * n_use.n_n;
s_en_MT *= ArtificialUpliftFactor_MT(n_use.n, n_use.n_n);
s_in_MT *= ArtificialUpliftFactor_MT(n_use.n, n_use.n_n); // returns factor 1.0 if n+nn > 1.0e14.
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*max(n_use.n, MINIMUM_NU_EI_DENSITY)*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
f64 nu_ei_effective =
nu_eiBar * (1.0 - 0.9*nu_eiBar*(nu_eHeart*nu_eHeart + omega_ce.z*omega_ce.z) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce) )) );
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
}
p_thermal_force_effect[iVertex] =
// viz part:
-(1.5*nu_eiBar*(
(omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)))
)
// vez part:
- 1.5*nu_eiBar*(
(omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)));
;
f64_vec3 v_nk = p_v_nk[iVertex];
f64_vec3 v_nkplus1 = p_v_nkplus1[iVertex];
// Is this the right sign?
p_friction_neutrals[iVertex] =
M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n)*
(p_v_nkplus1[iVertex].z - vie_f.vez)
- M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n)*
(p_v_nkplus1[iVertex].z - vie_f.viz);
p_friction_ei[iVertex] = -(1.0 + moverM)*nu_ei_effective*(vie_f.vez-vie_f.viz);
p_sum_of_effects[iVertex] =
p_MAR_ion_effect[iVertex] + p_MAR_elec_effect[iVertex] +
p_Ezext_electromotive[iVertex] + p_inductive_electromotive[iVertex] +
p_vxB[iVertex] + p_thermal_force_effect[iVertex] +
p_friction_neutrals[iVertex] + p_friction_ei[iVertex]
;
// should equal acceleration that obtained. Is it different??
p_difference[iVertex] = p_arelz[iVertex] - p_sum_of_effects[iVertex];
if (TEST_VS_MATRIX2) {
printf("vie_f.vez %1.10E vie_i.vez %1.10E vie_f.viz %1.8E vie_i.viz %1.8E \narelz %1.13E hsub %1.9E \n sum %1.13E diff %1.8E \n\n",
vie_f.vez, vie_i.vez, vie_f.viz, vie_i.viz, p_arelz[iVertex], h_use, p_sum_of_effects[iVertex], p_difference[iVertex]);
printf("effects: %1.8E %1.8E Ez %1.8E %1.8E vxB %1.8E thermalforce %1.8E fric %1.8E %1.8E\n\n######################\n\n",
p_MAR_ion_effect[iVertex], p_MAR_elec_effect[iVertex],
p_Ezext_electromotive[iVertex], p_inductive_electromotive[iVertex],
p_vxB[iVertex], p_thermal_force_effect[iVertex],
p_friction_neutrals[iVertex], p_friction_ei[iVertex]);
//// Now consider an intermediate formula:
//f64 beta_ie_z = (h_use*h_use*4.0*M_PI*qoverM*q*n_use.n
// + h_use*qoverMc*(Grad_Az.dot(ohm.beta_xy_z))
// + h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne
// + h_use * moverM*nu_ei_effective) / denom;
//f64 denom = 1.0 + h_use*h_use*q*eoverm*FOUR_PI*n_use.n
// + M_n_over_ne*h_use*(cross_section_times_thermal_en*n_use.n_n)
// + h_use*nu_ei_effective
// + h_use*qovermc*(Grad_Az.dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z)
// ;
//f64 vez_test_2 = vie_i.vez
// + h_use*p_MAR_elec_effect[iVertex]
// + h_use*(-(eoverm) * GetEzShape(info.pos.modulus()) * Ez_strength)//p_Ezext_electromotive[iVertex]
// + h_use*qovermc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex] + FOURPI_OVER_C*q*n_use.n*vie_f.viz))
// + h_use*((qovermc + qoverMc)*Grad_Az.dot(vie_f.vxy)) //p_vxB[iVertex]
// + h_use*(-1.5*nu_eiBar*(
// (omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
// (omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
// (m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))))//p_thermal_force_effect[iVertex]
// - M_n_over_ne*h_use*(cross_section_times_thermal_en*n_use.n_n)*(-v_nkplus1.z)
// - h_use*nu_ei_effective*(-vie_f.viz);
//vez_test_2 /= denom;
//if (iVertex == VERTCHOSEN) {
// printf("MAR elec component %1.12E \n"
// "Ez ext component %1.12E \n",
// "dAz/dt component %1.12E vie_f.viz %1.12E\n",
// "v x B component %1.12E \n",
// "thermal force component %1.12E \n",
// "- M_n_over_ne*h_use*nu_en*(-v_nkplus1.z) %1.12E \n"
// "- h_use*nu_ei_effective*(-vie_f.viz) %1.12E \n"
// "denom %1.12E \n------------------\nresult vez = %1.12E",
// h_use*p_MAR_elec_effect[iVertex],
// h_use*(-(eoverm)* GetEzShape(info.pos.modulus()) * Ez_strength),
// h_use*qovermc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex] + FOURPI_OVER_C*q*n_use.n*vie_f.viz)),
// vie_f.viz,
// h_use*((qovermc + qoverMc)*Grad_Az.dot(vie_f.vxy)), //p_vxB[iVertex]
// h_use*(-1.5*nu_eiBar*(
// (omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
// (omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
// (m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))))//p_thermal_force_effect[iVertex]
// ,
// -M_n_over_ne*h_use*(cross_section_times_thermal_en*n_use.n_n)*(-v_nkplus1.z),
// -h_use*nu_ei_effective*(-vie_f.viz),
// denom, vez_test_2
// );
//}
// Recreate vie_f from components:
f64 vdifftest_1 = vie_i.vez - vie_i.viz + h_use*(p_MAR_ion_effect[iVertex] + p_MAR_elec_effect[iVertex] +
p_Ezext_electromotive[iVertex] + p_inductive_electromotive[iVertex] +
p_vxB[iVertex] + p_thermal_force_effect[iVertex] +
p_friction_neutrals[iVertex] + p_friction_ei[iVertex]);
// Produce vez 1 : make it simpler.
f64 vez_1 = vie_i.vez + h_use*p_MAR_ion_effect[iVertex]
+h_use*p_MAR_elec_effect[iVertex]
+ h_use*(-(eoverm)* GetEzShape(info.pos.modulus()) * Ez_strength)//p_Ezext_electromotive[iVertex]
+ h_use*qovermc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex] + FOURPI_OVER_C*q*n_use.n*vie_f.viz))
+ h_use*((qovermc + qoverMc)*Grad_Az.dot(vie_f.vxy)) //p_vxB[iVertex]
+ h_use*(-1.5*nu_eiBar*(
(omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))))//p_thermal_force_effect[iVertex]
- M_n_over_ne*h_use*(cross_section_times_thermal_en*n_use.n_n)*(vie_f.vez-v_nkplus1.z)
- h_use*nu_ei_effective*(vie_f.vez-vie_f.viz);
// f64 vdiff2 = vez_test_2 - vie_f.viz;
printf("vie_i.vez vie_f.vez diff | vdifftest_1 "//veztest_2 vdiff2
"\n%1.14E %1.14E %1.14E %1.14E vez1 %1.14E\n",
vie_i.vez, vie_f.vez, vie_f.vez-vie_f.viz, vdifftest_1, //vez_test_2, vdiff2,
vez_1);
printf("Azdot_k+1 %1.14E calc'd %1.14E dA/dt_k %1.10E LapAz %1.10E 4pi/c Jz %1.10E n %1.14E vie_f.viz %1.14E vie_f.vez %1.14E\n",
Azdot,
dAzdt_k + h_use*c*c*(pLapAz[iVertex] + FOURPI_OVER_C*q*n_use.n*(vie_f.viz - vie_f.vez)),
dAzdt_k, pLapAz[iVertex], FOURPI_OVER_C*q*n_use.n*(vie_f.viz - vie_f.vez),
n_use.n, vie_f.viz, vie_f.vez);
// Result : difference 2 is closer to the program difference. Diff 1 is quite different.
// Explain diff between diff 2 and diff 1.
// Magic up matrix eqn:
if (TEST_VS_MATRIX2) {
memcpy(&MAR, p_MAR_neut + iVertex, sizeof(f64_vec3));
// 1. Need to work out vn coefficients !!!
f64 nu_ne_MT = cross_section_times_thermal_en*n_use.n;
f64 nu_ni_MT = cross_section_times_thermal_in*n_use.n;
f64 nu_in_MT = cross_section_times_thermal_in*n_use.n_n;
f64 nu_en_MT = cross_section_times_thermal_en*n_use.n_n;
f64 denom = 1.0 + h_use*M_e_over_en*nu_ne_MT +
h_use*M_i_over_in*nu_ni_MT;
f64_vec3 vn0 = (v_nk + h_use*MAR / (AreaMinor*n_use.n_n))
/ denom;
f64 beta_ne = h_use*M_e_over_en*nu_ne_MT / denom;
f64 beta_ni = h_use*M_i_over_in*nu_ni_MT / denom;
printf("v_nk.xy %1.14E %1.14E MAR.xy %1.14E %1.14E Nn %1.14E Area %1.14E denom %1.14E\n", v_nk.x,
v_nk.y, MAR.x, MAR.y, (AreaMinor*n_use.n_n), AreaMinor, denom);
printf("vn0 %1.14E %1.14E %1.14E beta_ni %1.14E beta_ne %1.14E \n",
vn0.x, vn0.y, vn0.z, beta_ni, beta_ne);
// vx, vy :
// from bwd eqn :
// given Lap Az and EzStrength, (Azdot -- do both ways) :
// the Azdot we got given used the vie_f that got calculated, so no we have to go back to Lap Az.
// Do without Ez terms. Put into separate sigma_izz, sigma_ezz
// 2. vx equation?
f64 temp = (h_use / (m_i + m_e))*
(m_n*m_i*nu_in_MT / (m_i + m_n) + m_e*m_n*nu_en_MT / (m_e + m_n));
f64 M_i_over_ie = m_i / (m_i + m_e);
f64 M_e_over_ie = m_e / (m_i + m_e);
f64 M_n_over_in = m_n / (m_i + m_n);
f64 M_n_over_en = m_n / (m_e + m_n);
f64_vec2 vxy0 = vie_i.vxy
+ h_use*M_i_over_ie*MAR_ion.xypart()/(AreaMinor*n_use.n)
+ h_use*M_e_over_ie*MAR_elec.xypart()/(AreaMinor*n_use.n)
+ temp*vn0.xypart(); // added
printf("vxy0 components:\n"
"vie_i.vxy %1.14E %1.14E MAR_ion_contrib %1.14E %1.14E \n"
"MAR_elec_contrib %1.14E %1.14E temp %1.14E vn0contrib %1.14E %1.14E\n\n",
vie_i.vxy.x, vie_i.vxy.y,
h_use*M_i_over_ie*MAR_ion.x / (AreaMinor*n_use.n),
h_use*M_i_over_ie*MAR_ion.y / (AreaMinor*n_use.n),
h_use*M_e_over_ie*MAR_elec.x / (AreaMinor*n_use.n),
h_use*M_e_over_ie*MAR_elec.y / (AreaMinor*n_use.n),
temp,
temp*vn0.x, temp*vn0.y
);
f64 vx_viz = (h_use*q / (c*(m_i + m_e)))*Grad_Az.x;
f64 vx_vez = (-h_use*q / (c*(m_i + m_e)))*Grad_Az.x;
f64 vy_viz = (h_use*q / (c*(m_i + m_e)))*Grad_Az.y;
f64 vy_vez = (-h_use*q / (c*(m_i + m_e)))*Grad_Az.y;
f64 vxy_vxy = -temp*(1.0- beta_ne - beta_ni);
vxy_vxy -= 1.0; // move LHS over to RHS so we've got 0 = .
printf(" ... 1 vx vy viz vez \n");
printf(" vx %1.14E %1.14E %1.14E %1.14E %1.14E \n"
" vy %1.14E %1.14E %1.14E %1.14E %1.14E \n",
vxy0.x, vxy_vxy, 0.0, vx_viz, vx_vez,
vxy0.y, 0.0, vxy_vxy, vy_viz, vy_vez
);
// VERIFY AGAIN THAT THIS IS GIVING SAME COEFFICIENTS.
// Work systematically: reduce vxy equation and sub in.
denom = -vxy_vxy; // move to LHS ..
OhmsCoeffs ohm;
v4 v0;
v0.vxy = vxy0 / denom;
ohm.beta_xy_z.x = vx_viz/denom;
ohm.beta_xy_z.y = vy_viz/denom;
printf("=-----------------\nv0.vxy %1.14E %1.14E beta_xy_z %1.14E %1.14E \n---------------\n",
v0.vxy.x, v0.vxy.y, ohm.beta_xy_z.x, ohm.beta_xy_z.y);
f64 EzExt = Ez_strength*GetEzShape(info.pos.modulus());
// Worry afterwards about what sigma Ez does.
// Do this in stages.
f64 viz0 = vie_i.viz + h_use*MAR_ion.z / (AreaMinor*n_use.n)
+ h_use*qoverM*EzExt
- h_use*qoverMc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex]))
+ h_use*1.5*nu_eiBar*
((omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)))
+ h_use*M_n_over_in*nu_in_MT*vn0.z;
printf("viz0 components: vie_i.viz %1.14E \n"
"from MAR_ion.z : %1.14E | from EzExt %1.14E \n"
"from Azdot_k+hc^2LapAz %1.14E \n"
"thermalforceterm %1.14E \n"
"vn0.z effect %1.14E \n"
"total = viz0 : %1.14E \n",
vie_i.viz, h_use*MAR_ion.z / (AreaMinor*n_use.n),
h_use*qoverM*EzExt,
-h_use*qoverMc*(dAzdt_k + h_use*c*c*(pLapAz[iVertex])),
h_use*1.5*nu_eiBar*
((omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))),
h_use*M_n_over_in*nu_in_MT*vn0.z, viz0
);
f64 viz_vx = -h_use*qoverMc*Grad_Az.x;
f64 viz_vy = -h_use*qoverMc*Grad_Az.y;
f64 viz_viz = -h_use*qoverM*h_use*4.0*M_PI*q*n_use.n
-h_use*M_n_over_in*nu_in_MT*(1.0-beta_ni)
- h_use*moverM*nu_ei_effective
;
f64 viz_vez = h_use*qoverM*h_use*4.0*M_PI*q*n_use.n
+ h_use*M_n_over_in*nu_in_MT*(beta_ne)
+ h_use*moverM*nu_ei_effective;
// Think about how it will be solved.
// Eqn: vxvx vx + vxvy vy + .. = -vx0.
// And here vxvx should include -1 vx
viz_viz -= 1.0;
printf(" viz %1.14E %1.14E %1.14E %1.14E %1.14E \n",
viz0, viz_vx, viz_vy, viz_viz, viz_vez);
f64 vez0 = vie_i.vez + h_use*MAR_elec.z / (AreaMinor*n_use.n)
- h_use*eoverm*EzExt
+ h_use*qovermc*(dAzdt_k + h_use*c*c*pLapAz[iVertex])
- h_use*1.5*nu_eiBar*
((omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce)))
+ h_use*M_n_over_en*nu_en_MT*vn0.z;
printf("\nvez0 : vez_k %1.14E \n"
"MAReffect %1.14E \n"
"EzExteffect %1.14E \n"
"Azdot_effect %1.14E \n"
"thermalforceeffect %1.14E \n"
"vn0.z_effect %1.14E \n"
"vez0 %1.14E\n",
vie_i.vez,
h_use*MAR_elec.z / (AreaMinor*n_use.n),
-h_use*eoverm*EzExt,
h_use*qovermc*(dAzdt_k + h_use*c*c*pLapAz[iVertex]),
-h_use*1.5*nu_eiBar*
((omega_ce.x*omega_ce.z - nu_eHeart * omega_ce.y)*gradTe.x +
(omega_ce.y*omega_ce.z + nu_eHeart * omega_ce.x)*gradTe.y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega_ce.dot(omega_ce))),
h_use*M_n_over_en*nu_en_MT*vn0.z,
vez0
);
f64 vez_vx = h_use*qovermc*Grad_Az.x;
f64 vez_vy = h_use*qovermc*Grad_Az.y;
f64 vez_viz = h_use*eoverm*h_use*4.0*M_PI*q*n_use.n
+ h_use*M_n_over_en*nu_en_MT*beta_ni
+ h_use*nu_ei_effective;
f64 vez_vez = -h_use*eoverm*h_use*4.0*M_PI*q*n_use.n
- h_use*M_n_over_en*nu_en_MT*(1.0-beta_ne)
- h_use*nu_ei_effective;
printf("\n vez_vx %1.14E vez_vez %1.14E \n"
"vezvez components: hhq4piqn/m %1.14E hnu_en %1.14E hnu_ei %1.14E\n\n",
vez_vx, vez_vez, -h_use*eoverm*h_use*4.0*M_PI*q*n_use.n,
- h_use*M_n_over_en*nu_en_MT*(1.0 - beta_ne),
- h_use*nu_ei_effective
);
// FIGURE IT OUT : WHAT DOES THIS GIVE AND DOES IT SATISFY BWD EQN
// It practically is the bwd eqn
// If the result from our PopOhms is different, should be possible to detect exactly why.
// We do not take Azdot_k+1 as given but this also should be possible to compute
// given the v_k+1 from this.
// This is bound to work.
vez_vez -= 1.0;
printf(" vez %1.14E %1.14E %1.14E %1.14E %1.14E \n",
vez0, vez_vx, vez_vy, vez_viz, vez_vez);
printf(" ----------------------------------------------------\n");
// Work systematically: reduce vxy equation and sub in.
denom = -vxy_vxy; // move to LHS ..
vxy0.x /= denom;
vxy0.y /= denom;
vx_viz /= denom;
vx_vez /= denom;
vy_viz /= denom;
vy_vez /= denom;
// Substitute in:
viz0 += viz_vx*vxy0.x + viz_vy*vxy0.y;
viz_viz += viz_vx*vx_viz + viz_vy*vy_viz;
viz_vez += viz_vx*vx_vez + viz_vy*vy_vez;
vez0 += vez_vx*vxy0.x + vez_vy*vxy0.y;
vez_viz += vez_vx*vx_viz + vez_vy*vy_viz;
vez_vez += vez_vx*vx_vez + vez_vy*vy_vez;
printf(" viz0 vizviz vizvez %1.14E %1.14E %1.14E \n",
viz0, viz_viz, viz_vez);
printf(" vez0 vezviz vezvez %1.14E %1.14E %1.14E \n",
vez0, vez_viz, vez_vez);
printf(" ----------------------------------------------------\n");
printf("REDUCE viz:\n");
viz0 /= viz_viz;
printf("viz0 %1.14E vizvez %1.14E \n", viz0, viz_vez);
printf(" ----------------------------------------------------\n");
};
};
};
}
__global__ void kernelPopulateBackwardOhmsLaw_noadvect(
f64 h_use,
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
f64_vec3 * __restrict__ p_B,
f64 * __restrict__ p_LapAz,
f64_vec2 * __restrict__ p_GradAz,
f64_vec2 * __restrict__ p_GradTe,
nvals * __restrict__ p_n_minor_use,
T3 * __restrict__ p_T_minor_use,
v4 * __restrict__ p_vie_src,
f64_vec3 * __restrict__ p_v_n_src,
AAdot * __restrict__ p_AAdot_src,
f64 * __restrict__ p_AreaMinor,
f64_vec3 * __restrict__ p_vn0_dest,
v4 * __restrict__ p_v0_dest,
OhmsCoeffs * __restrict__ p_OhmsCoeffs_dest,
//AAdot * __restrict__ p_AAdot_intermediate,
f64 * __restrict__ p_Iz0,
f64 * __restrict__ p_sigma_zz,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_effect_of_viz0_on_vez0,
f64 * __restrict__ p_beta_ie_z,
bool const bSwitchSave)
{
// Don't forget we can use 16KB shared memory to save a bit of overspill:
// (16*1024)/(512*8) = 4 doubles only for 512 threads. 128K total register space per SM we think.
__shared__ f64 Iz[threadsPerTileMinor], sigma_zz[threadsPerTileMinor];
// __shared__ f64 Iz_k[threadsPerTileMinor];
__shared__ f64_vec2 omega[threadsPerTileMinor], grad_Az[threadsPerTileMinor],
gradTe[threadsPerTileMinor];
// Putting 8 reduces to 256 simultaneous threads. Experiment with 4 in shared.
// f64 viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az; // THESE APPLY TO FEINT VERSION. ASSUME NOT FEINT FIRST.
v4 v0;
f64 denom, AreaMinor;
f64_vec3 vn0;
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[iMinor];
// Can see no reason not to put OUTERMOST here. No point creating a big gradient of vz to it.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 vie_k = p_vie_src[iMinor];
f64_vec3 v_n_src = p_v_n_src[iMinor];
nvals n_use = p_n_minor_use[iMinor];
AreaMinor = p_AreaMinor[iMinor];
// Are we better off with operator = or with memcpy?
vn0 = v_n_src;
// if ((TESTTRI)) printf("GPU %d vie_k %1.14E %1.14E\n", iMinor, vie_k.vxy.x, vie_k.vxy.y);
{
f64_vec3 MAR;
memcpy(&MAR, p_MAR_neut + iMinor, sizeof(f64_vec3));
// CHECK IT IS INTENDED TO AFFECT Nv
if (TEST_VS_MATRIX) {
printf("%d VS_MAT: v_nk.y %1.14E MAR.y %1.14E Nn %1.14E Area %1.14E\n",
iMinor, vn0.y, MAR.y, (AreaMinor*n_use.n_n), AreaMinor);
printf("%d VS_MAT: v_nk.x %1.14E MAR.x %1.14E Nn %1.14E \n",
iMinor, vn0.x, MAR.x, (AreaMinor*n_use.n_n));
};
// REVERTED THE EDIT TO USE 1/n -- THIS WILL NOT GIVE CORRECT M.A.R. EFFECT ON INTEGRAL nv
// We need conservation laws around shock fronts.
vn0.x += h_use * (MAR.x / (AreaMinor*n_use.n_n)); // p_one_over_n[iMinor].n_n/ (AreaMinor));
vn0.y += h_use * (MAR.y / (AreaMinor*n_use.n_n));// MomAddRate is addition rate for Nv. Divide by N.
vn0.z += h_use * (MAR.z / (AreaMinor*n_use.n_n));
memcpy(&MAR, p_MAR_ion + iMinor, sizeof(f64_vec3));
v0.vxy = vie_k.vxy + h_use * (m_i*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.viz = vie_k.viz + h_use * MAR.z / (n_use.n*AreaMinor);
if (TEST_VS_MATRIX) {
printf("%d VS_MAT viz_k %1.14E viz0 with MAR %1.14E \n",
iMinor, vie_k.viz, v0.viz);
}
if (TESTACCEL_X) printf("%d vx_k %1.9E with MARi %1.9E n %1.8E N %1.8E\n", iMinor, vie_k.vxy.x, v0.vxy.x,
n_use.n, n_use.n*AreaMinor);
if (TESTACCEL2) printf("%d vy_k %1.9E with MARi %1.9E MAR.y %1.9E\n", iMinor-BEGINNING_OF_CENTRAL, vie_k.vxy.y, v0.vxy.y,
MAR.y);
memcpy(&MAR, p_MAR_elec + iMinor, sizeof(f64_vec3));
v0.vxy += h_use * (m_e*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
if (TESTVEZ) printf("%d vez_k %1.9E MAR.z %1.9E N %1.9E \n",
iMinor, vie_k.vez, MAR.z, (n_use.n*AreaMinor));
if (TESTACCEL_X) printf("%d v0x with MARi+e %1.9E\n", iMinor - BEGINNING_OF_CENTRAL, v0.vxy.x);
if (TESTACCEL2) printf("%d v0y with MARi+e %1.9E MAR.y \n", iMinor - BEGINNING_OF_CENTRAL, v0.vxy.y, MAR.y);
if (v0.vez != v0.vez) printf("NANVEZ %d v_k %1.9E MAR.z %1.9E \n", iMinor, vie_k.vez, MAR.z);
if (TESTVEZ) printf("\nGPU %d MAR: changexy %1.10E %1.10E vezchange %1.10E Area %1.10E v0.vez %1.9E vie_k.vez %1.9E\n", iMinor,
h_use * (m_e*MAR.x / (n_use.n*(m_i + m_e)*AreaMinor)),
h_use * (m_e*MAR.y / (n_use.n*(m_i + m_e)*AreaMinor)),
h_use * MAR.z / (n_use.n*AreaMinor),
AreaMinor, v0.vez, vie_k.vez);
}
OhmsCoeffs ohm;
f64 beta_ie_z, LapAz;
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_minor_use[iMinor];
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
//nu_ne_MT = s_en_MT * electron_thermal * n_use.n; // have to multiply by n_e for nu_ne_MT
//nu_ni_MT = s_in_MT * ionneut_thermal * n_use.n;
//nu_in_MT = s_in_MT * ionneut_thermal * n_use.n_n;
//nu_en_MT = s_en_MT * electron_thermal * n_use.n_n;
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*max(MINIMUM_NU_EI_DENSITY,n_use.n)*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
// Confusing, why does this say that? We used visc en in nu_eHeart, explanation?
if (nu_eiBar != nu_eiBar) printf("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n"
"iMinor %d n_use.n %1.9E lnLambda %1.9E Te %1.9E sqrt %1.9E \n",
iMinor, n_use.n, lnLambda, T.Te, sqrt_Te);
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH*n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH*n_use.n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
};
// ARTIFICIAL CHANGE TO STOP HAVING TO WORRY ABOUT SILLY VALUES IN AREAS THAT DON'T MATTER MUCH :
cross_section_times_thermal_en *= ArtificialUpliftFactor_MT(n_use.n, n_use.n_n);
cross_section_times_thermal_in *= ArtificialUpliftFactor_MT(n_use.n, n_use.n_n); // returns factor 1.0 if n+nn > 1.0e14.
if (TESTVEZ) printf("Uplift factor %1.9E n %1.8E %1.8E\n", ArtificialUpliftFactor(n_use.n, n_use.n_n), n_use.n, n_use.n_n);
// DEBUG:
if (0)//iMinor == CHOSEN)
{
printf("%d xs_therm_in en %1.8E %1.8E nn %1.8E n %1.8E s_in en %1.8E %1.8E i-n therm %1.8E Uplift %1.8E\n",
iMinor,
cross_section_times_thermal_in, cross_section_times_thermal_en,
n_use.n_n, n_use.n, s_en_MT, s_in_MT, ionneut_thermal,
ArtificialUpliftFactor_MT(n_use.n, n_use.n_n));
// Did not trigger? What gives?
};
};
denom = 1.0 + h_use*M_e_over_en*(cross_section_times_thermal_en*n_use.n)
+ h_use*M_i_over_in*(cross_section_times_thermal_in*n_use.n);
if (TESTVNX) printf("%d v_n.x before divide %1.10E \n", iMinor, vn0.x);
if (TESTVNY) printf("%d v_n.y before divide %1.10E \n", iMinor, vn0.y);
vn0 /= denom; // It is now the REDUCED value
if (TESTVNX) printf("%d v_n.x after divide %1.10E \n", iMinor, vn0.x);
if (TESTVNY) printf("%d v_n.y after divide %1.10E \n", iMinor, vn0.y);
//if (iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL) {
// printf("vn_k %1.9E %1.9E %1.9E vn0 %1.9E %1.9E %1.9E denom %1.9E\n",
// v_n_src.x, v_n_src.y, v_n_src.z, vn0.x, vn0.y, vn0.z, denom);
//};
ohm.beta_ne = h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n) / denom;
ohm.beta_ni = h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n) / denom;
if (TEST_VS_MATRIX) printf("VS_MAT: vn0 %1.14E %1.14E %1.14E beta_ni %1.14E beta_ne %1.14E denom %1.14E\n",
vn0.x, vn0.y, vn0.z, ohm.beta_ni, ohm.beta_ne, denom);
// Now we do vexy:
grad_Az[threadIdx.x] = p_GradAz[iMinor];
gradTe[threadIdx.x] = p_GradTe[iMinor];
LapAz = p_LapAz[iMinor];
// debug:
if (LapAz != LapAz) printf("----------\n%d LapAz NaN\n---------\n", iMinor);
if (((TESTTRI))) printf("GPU %d: LapAz %1.14E\n", CHOSEN, LapAz);
v0.vxy +=
(h_use / ((m_i + m_e)))*(m_n*M_i_over_in*(cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*
( vn0.xypart()); // this reflects v_n and the next reflects minus itself
denom = 1.0 + (h_use / (m_i + m_e))*(
m_n* M_i_over_in* (cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*
(1.0 - ohm.beta_ne - ohm.beta_ni);
if (TEST_VS_MATRIX) printf("VS_MAT: vxy0 before divide %1.14E %1.14E denom %1.14E\n",
v0.vxy.x, v0.vxy.y, denom);
v0.vxy /= denom;
if (TESTACCEL_X) printf("%d v0x with neut soak %1.9E\n", iMinor, v0.vxy.x);
if (TESTACCEL2) printf("%d v0y with neut soak %1.9E\n", iMinor - BEGINNING_OF_CENTRAL, v0.vxy.y);
ohm.beta_xy_z = (h_use * q / (c*(m_i + m_e)*denom)) * grad_Az[threadIdx.x]; // coeff on viz-vez
if (TEST_VS_MATRIX) printf("VS_MAT: vxy0 %1.14E %1.14E beta_xy_z %1.14E %1.14E \n\n",
v0.vxy.x, v0.vxy.y, ohm.beta_xy_z.x, ohm.beta_xy_z.y);
// ================================================================================================
omega[threadIdx.x] = qovermc*p_B[iMinor].xypart();
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*(nu_eHeart*nu_eHeart + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].x*omega[threadIdx.x].x + omega[threadIdx.x].y*omega[threadIdx.x].y + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)));
AAdot AAzdot_k = p_AAdot_src[iMinor];
v0.viz +=
-h_use*qoverMc*(AAzdot_k.Azdot + h_use * c*c*LapAz)
- h_use*qoverMc*(v0.vxy).dot(grad_Az[threadIdx.x]);// v x B
if (TEST_VS_MATRIX) {
printf("%d VS_MAT viz0 %1.14E Azdot+ccLapAz term %1.14E vxy.gradAz term %1.14E \n",
iMinor, v0.viz,
-h_use*qoverMc*(AAzdot_k.Azdot + h_use * c*c*LapAz),
-h_use*qoverMc*(v0.vxy).dot(grad_Az[threadIdx.x])
);
}
// Still omega_ce . Check formulas.
v0.viz +=
1.5*h_use*nu_eiBar*(
(omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x])));
v0.viz += h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *vn0.z;
if (TEST_VS_MATRIX) {
printf("%d VS_MAT viz0 %1.14E thermalforceterm %1.14E vn0.z for friction %1.14E \n",
iMinor, v0.viz,
1.5*h_use*nu_eiBar*(
(omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]))),
h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *vn0.z
);
}
denom = 1.0 + h_use * h_use*4.0*M_PI*qoverM*q*n_use.n
+ h_use * qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) +
h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(1.0 - ohm.beta_ni)
+ h_use *moverM*nu_ei_effective;
if (bSwitchSave) p_denom_i[iMinor] = denom;
// viz0_coeff_on_Lap_Az = -0.5*h_use*qoverMc*h_use*c*c / denom;
v0.viz /= denom;
if (TEST_VS_MATRIX)
printf("Denom %1.14E = 1+components:\n"
"h_use*h_use*4.0*M_PI*qoverM*q*n_use.n %1.14E \n"
"h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) %1.14E \n"
"h_use*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n)*(1.0-ohm.beta_ni) %1.14E \n"
"h_use *moverM*nu_ei_effective %1.14E \n"
"------------------------------------- new value of viz0 %1.14E \n"
,
denom,
h_use * h_use*4.0*M_PI*qoverM*q*n_use.n,
h_use * qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)),
h_use*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n)*(1.0 - ohm.beta_ni),
h_use *moverM*nu_ei_effective,
v0.viz
);
//if (((TESTTRI))) printf("viz0 divided %1.14E denom %1.14E \n", v0.viz, denom);
ohm.sigma_i_zz = h_use * qoverM / denom;
beta_ie_z = (h_use*h_use*4.0*M_PI*qoverM*q*n_use.n
+ h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))
+ h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne
+ h_use * moverM*nu_ei_effective) / denom;
if (TEST_VS_MATRIX)
printf("ohm.sigma_i_zz %1.14E = hq/M / denom \n"
"beta_ie_z %1.14E components before divide by denom:\n"
"h_use*h_use*4.0*M_PI*qoverM*q*n_use.n %1.14E \n"
"h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) %1.14E \n"
"h_use*M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n)*ohm.beta_ne %1.14E \n"
"h_use*moverM*nu_ei_effective %1.14E \n"
"-----------------------------------------------\n",
ohm.sigma_i_zz,
beta_ie_z,
h_use*h_use*4.0*M_PI*qoverM*q*n_use.n,
h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)),
h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne,
h_use * moverM*nu_ei_effective);
if (TESTOHMS) printf("%d v0.vez %1.12E before Azdot LapAz and JxB\n", iMinor, v0.vez);
// ====================================================================
// vez:
v0.vez +=
h_use *qovermc*(AAzdot_k.Azdot
+ h_use * c*c*(LapAz + FOURPI_Q_OVER_C*n_use.n*v0.viz))
+ h_use*qovermc*(v0.vxy + ohm.beta_xy_z*v0.viz ).dot(grad_Az[threadIdx.x]); // v x B
if (TESTVEZ) printf("%d AzdotLapAzcomponent(v0.viz) %1.12E v0.viz %1.12E \n"
"v x B term (v0) %1.12E \n--------------------------\n"
, iMinor,
h_use *qovermc*(AAzdot_k.Azdot
+ h_use * c*c*(LapAz + FOURPI_Q_OVER_C*n_use.n*v0.viz)), v0.viz,
h_use*qovermc*(v0.vxy + ohm.beta_xy_z*v0.viz).dot(grad_Az[threadIdx.x])
);
if (TESTVEZ) printf("%d vh_use *qovermc*(AAzdot_k.Azdot) %1.14E \nhhqc_overm(LapAz) %1.14E LapAz %1.14E \n"
"hh4piqqoverm n viz %1.14E hq/mc v0.vxy.gradAz %1.14E hq/mc beta_xyz viz.gradAz %1.14E \n"
"v0.vxy %1.12E %1.12E grad Az %1.12E %1.12E \n",
iMinor, h_use *qovermc*(AAzdot_k.Azdot),
h_use *qovermc*(h_use * c*c*(LapAz )),
LapAz,
h_use *qovermc*(h_use * c*c*(FOURPI_Q_OVER_C*n_use.n*v0.viz)),
h_use*qovermc*v0.vxy.dot(grad_Az[threadIdx.x]),
h_use*qovermc*(ohm.beta_xy_z*v0.viz).dot(grad_Az[threadIdx.x]),
v0.vxy.x, v0.vxy.y, grad_Az[threadIdx.x].x, grad_Az[threadIdx.x].y
);
// implies:
f64 effect_of_viz0_on_vez0 =
h_use * qovermc*h_use * c*c* FOURPI_Q_OVER_C*n_use.n
+ h_use*qovermc*(ohm.beta_xy_z.dot(grad_Az[threadIdx.x])); // from the instruction above
if (TESTOHMS) printf("%d v0.vez %1.14E before thermal force\n", iMinor, v0.vez);
v0.vez -=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]) + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT));
if (TESTVEZ) printf("%d thermal force %1.14E \n", iMinor, -1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]) + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)));
// could store this from above and put opposite -- dividing by m_e instead of m_i
// overdue..?
if (TESTVEZ) printf("%d v0.vez %1.12E MARKER1 \n", iMinor, v0.vez);
effect_of_viz0_on_vez0 +=
h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni + h_use*nu_ei_effective;
// Apparently we thought to save this INSTEAD of putting it into vez0
// So the question is-- - have we deliberately excluded the effect from vez0 IN THE CASE that we are setting up a linear relationship ?
// NEUE:
v0.vez += (h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni + h_use*nu_ei_effective)*v0.viz
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n)*vn0.z;
denom = 1.0 + (h_use*h_use*4.0*M_PI*q*eoverm*n_use.n
+ h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z)
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z)
+ h_use*nu_ei_effective*(1.0 - beta_ie_z);
if (TEST_VS_MATRIX)
printf("\nPOPOHMS denom_e %1.14E components: \nhh4piqqn/m*(1.0-beta_ie_z) %1.14E grad_Az_dot_beta_xy_z %1.14E \n"
"nu_en_without_ni_ie(1-beta_ne) %1.14E nu_en_ni_ie %1.14E\n"
"hnu_ei_eff %1.14E times_minus_beta_ie_z %1.14E\n\n",
denom,
h_use*h_use*4.0*M_PI*q*eoverm*n_use.n*(1.0 - beta_ie_z),
h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*(1.0 - beta_ie_z),
h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne),
h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(-ohm.beta_ni*beta_ie_z),
h_use*nu_ei_effective,
h_use*nu_ei_effective*(- beta_ie_z)
);
if (TESTVEZ) printf("%d v0.vez %1.12E nu_ei_effective %1.12E v0.viz %1.12E \n"
"beta_ie_z %1.12E nu_en %1.12E denom %1.12E\n", iMinor, v0.vez, nu_ei_effective, v0.viz, beta_ie_z,
M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n), denom);
// vez0_coeff_on_Lap_Az = h_use * h_use*0.5*qovermc* c*c / denom;
ohm.sigma_e_zz =
(-h_use * eoverm
+ h_use * h_use*4.0*M_PI*q*eoverm*n_use.n*ohm.sigma_i_zz
+ h_use *qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*ohm.sigma_i_zz
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni*ohm.sigma_i_zz
+ h_use*nu_ei_effective*ohm.sigma_i_zz)
/ denom;
if (TESTVEZ) printf("%d grad_Az %1.9E %1.9E \n", iMinor, grad_Az[threadIdx.x].x, grad_Az[threadIdx.x].y);
v0.vez /= denom;
effect_of_viz0_on_vez0 /= denom; // of course
if (TESTVEZ) printf("%d v0.vez %1.12E after divide\n", iMinor, v0.vez);
if (bSwitchSave) {
p_denom_e[iMinor] = denom;
p_effect_of_viz0_on_vez0[iMinor] = effect_of_viz0_on_vez0;
p_beta_ie_z[iMinor] = beta_ie_z; // see that doing it this way was not best.
} else {
// #########################################################################################################
// DEBUG: pass graphing parameters through these.
// #########################################################################################################
p_denom_i[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective;
p_denom_e[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n /
(M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective);
};
// Now update viz(Ez):
v0.viz += beta_ie_z * v0.vez;
ohm.sigma_i_zz += beta_ie_z * ohm.sigma_e_zz;
// sigma_e_zz and sigma_i_zz are change in vz for a change in Ez
{
f64 EzShape = GetEzShape(info.pos.modulus());
ohm.sigma_i_zz *= EzShape;
ohm.sigma_e_zz *= EzShape;
}
if (TESTVEZ) printf("%d final v0.vez %1.12E sigma %1.12E \n", iMinor, v0.vez, ohm.sigma_e_zz);
// Think maybe we should get rid of most of this routine out of the subcycle.
// Rate of acceleration over timestep due to resistance, pressure, thermal force etc could be stored.
// Saving off some eqn data isn't so bad when we probably overflow registers and L1 here anyway.
// All we need is to know that we update sigma
// We can do addition of
// ==============================================================================================
p_v0_dest[iMinor] = v0;
p_OhmsCoeffs_dest[iMinor] = ohm;
p_vn0_dest[iMinor] = vn0;
Iz[threadIdx.x] = q*AreaMinor*n_use.n*(v0.viz - v0.vez);
sigma_zz[threadIdx.x] = q*AreaMinor*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz);
// BRING THIS BACK AND CHECK IT ALL OUT:
// if (Iz[threadIdx.x] > 0.0) printf("%d : Iz %1.8E n_use %1.8E v0.viz %1.8E v0.vez %1.8E\n",
// iMinor, Iz[threadIdx.x], n_use.n, v0.viz, v0.vez);
}
else {
// Non-domain triangle or vertex
// ==============================
// Need to decide whether crossing_ins triangle will experience same accel routine as the rest?
// I think yes so go and add it above??
// We said v_r = 0 necessarily to avoid sending mass into ins.
// So how is that achieved there? What about energy loss?
// Need to determine a good way. Given what v_r in tri represents. We construe it to be AT the ins edge so
// ...
Iz[threadIdx.x] = 0.0;
sigma_zz[threadIdx.x] = 0.0;
memset(&(p_v0_dest[iMinor]), 0, sizeof(v4)); // no velocity!
memset(&(p_vn0_dest[iMinor]), 0, sizeof(f64_vec3));
memset(&(p_OhmsCoeffs_dest[iMinor]), 0, sizeof(OhmsCoeffs));
// if ((iMinor < BEGINNING_OF_CENTRAL) && ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)))
// {
// p_AAdot_intermediate[iMinor].Azdot = 0.0;
// Set Az equal to neighbour in every case, after Accelerate routine.
// }
// else {
// Let's make it go right through the middle of a triangle row for simplicity.
//f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
//{
// // Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// // ASSUME we are fed Iz_prescribed.
// //Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
// AreaMinor = p_AreaMinor[iMinor];
// Jz = negative_Iz_per_triangle / AreaMinor; // Iz would come from multiplying back by area and adding.
//};
// AAdot temp = p_AAdot_src[iMinor];
// temp.Azdot += h_use * c*(c*p_LapAz[iMinor]);// +4.0*M_PI*Jz);
// + h_use * ROCAzdot_antiadvect // == 0
// p_AAdot_intermediate[iMinor] = temp; //
// };
};
__syncthreads();
// .Collect Jz = Jz0 + sigma_zz Ez_strength on each minor cell
// .Estimate Ez
// sigma_zz should include EzShape for this minor cell
// The mission if iPass == 0 was passed is to save off Iz0, SigmaIzz.
// First pass set Ez_strength = 0.0.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + k];
Iz[threadIdx.x] += Iz[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + s - 1];
Iz[threadIdx.x] += Iz[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sigma_zz[blockIdx.x] = sigma_zz[0];
p_Iz0[blockIdx.x] = Iz[0];
}
// Wish to make the Jz contribs to Azdot on each side of the ins exactly equal in L1,
// meant making this long routine even longer with collecting Iz_k.
}
__global__ void kernelPopulateResiduals(
f64 * __restrict__ pLapAz,
nvals * __restrict__ p_n_minor,
v4 * __restrict__ p_vie,
f64 * __restrict__ p_residual
) {
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 FourPiovercJz = FOURPI_Q_OVER_C*p_n_minor[iMinor].n*(p_vie[iMinor].viz - p_vie[iMinor].vez);
p_residual[iMinor] = -pLapAz[iMinor] - FourPiovercJz;
}
__global__ void kernelAccelerate_v_from_advection
(
f64 const h_use,
structural * __restrict__ p_info_minor,
nvals * __restrict__ p_n_k, // multiply by old mass ..
f64 * __restrict__ p_AreaMinor_k,
nvals * __restrict__ p_n_plus, // divide by new mass ..
f64 * __restrict__ p_AreaMinor_plus,
v4 * __restrict__ p_vie_k,
f64_vec3 * __restrict__ p_v_n_k,
f64_vec3 * __restrict__ p_MAR_neut, // these contain the mom flux due to advection.
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
// outputs:
v4 * __restrict__ p_vie_dest,
f64_vec3 * __restrict__ p_v_n_dest)
{
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x; // INDEX OF VERTEX
structural info = p_info_minor[iMinor];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS))
{
v4 vie_k = p_vie_k[iMinor];
f64_vec3 v_n_k = p_v_n_k[iMinor];
nvals n_k = p_n_k[iMinor];
f64 AreaMinor_k = p_AreaMinor_k[iMinor];
nvals n_dest = p_n_plus[iMinor];
f64 AreaMinor_plus = p_AreaMinor_plus[iMinor];
f64 Nk = n_k.n*AreaMinor_k;
f64 Nnk = n_k.n_n*AreaMinor_k;
f64 Nplus = n_dest.n*AreaMinor_plus;
f64 Nnplus = n_dest.n_n*AreaMinor_plus;
f64_vec3 MAR;
memcpy(&MAR, p_MAR_neut + iMinor, sizeof(f64_vec3));
f64_vec3 v_n;
v_n = (v_n_k*Nnk + h_use * MAR) / Nnplus;
// . We will really need to overview and see if this formula is correct -- did we already account for change in n?
// . Need to check if we would be double-counting to include * oldAreaMinor / newAreaMinor --- I think we counted it.
// The problem is that n's change is the change in N inferred in minor
// so we are preferring to assume that we use the change in N that
// would follow from looking at v on the boundaries of the minor.
// That is what we already rounded up in MAR.
if (TESTVNY2) printf("\n\n%d : v_n_k.y %1.10E v_n.x %1.12E h %1.8E MAR.y %1.10E N %1.10E \n",
iMinor, v_n_k.y, v_n.y, h_use, MAR.y, Nnplus);
if (TESTVNX) printf("\n\n%d : v_n_k.x %1.10E v_n.x %1.12E h %1.8E MAR.x %1.10E N %1.10E \n",
iMinor, v_n_k.x, v_n.x, h_use, MAR.x, Nnplus);
v4 vie;
memcpy(&MAR, p_MAR_ion + iMinor, sizeof(f64_vec3));
//vie.vxy = (vie_k.vxy * n_k.n + h_use * (m_i*MAR.xypart() / ((m_i + m_e)*AreaMinor))) / n_dest.n;
//vie.viz = (vie_k.viz * n_k.n + h_use * MAR.z / (AreaMinor)) / n_dest.n;
// We would PREFER the N_k / N_k+1 version, however
vie.vxy = (vie_k.vxy*Nk + h_use * m_i*MAR.xypart() / (m_i + m_e))/Nplus;
vie.viz = (vie_k.viz*Nk + h_use * MAR.z)/Nplus;
if (TEST_ACCEL_Y) printf("iMinor %d vie_k.vxy.y %1.8E Nk %1.9E Nplus %1.9E nk nplus %1.9E %1.9E \n"
"AreaMinor k plus %1.9E %1.9E intermediate vxy %1.9E MAR_ion %1.9E h_use %1.10E \n",
iMinor, vie_k.vxy.y, Nk, Nplus, n_k.n, n_dest.n, AreaMinor_k, AreaMinor_plus, vie.vxy.y, MAR.y, h_use);
memcpy(&MAR, p_MAR_elec + iMinor, sizeof(f64_vec3));
vie.vxy += h_use * (m_e*MAR.xypart() / ((m_i + m_e)*Nplus));
vie.vez = (vie_k.vez*Nk + h_use * MAR.z) / Nplus;
// 22/11/20 FIX FOR NOW: If wind is blowing outwards, do not increase velocity in CROSSING_INS just due to density decreasing.
// Density loss is loss of momentum at same rate so leave velocity unchanged.
// (We have prevented momentum traffic to vertex minors!)
if ((info.flag == CROSSING_INS) && (vie.vxy.dot(info.pos) > 0.0)) vie = vie_k;
if ((info.flag == CROSSING_INS) && (v_n.dot(info.pos) > 0.0)) v_n = v_n_k;
// Bit messed up, but we've got to try something.
if (TEST_ACCEL_Y) printf("MAR_e %1.9E vxy.y %1.9E \n", MAR.y, vie.vxy.y);
if (TEST_ACCEL_EZ) printf("\n%d vie.vez %1.10E vie_k.vez %1.10E Nk %1.9E Nplus %1.9E oldNvez %1.8E \n"
"Nratio %1.8E h*MAR.z %1.8E Areaminor k %1.9E plus %1.9E ; \n",
iMinor, vie.vez, vie_k.vez, Nk, Nplus, Nk*vie_k.vez, Nplus / Nk, h_use*MAR.z,
AreaMinor_k, AreaMinor_plus);
memcpy(&(p_vie_dest[iMinor]), &vie, sizeof(v4));
p_v_n_dest[iMinor] = v_n;
} else {
if (info.flag == OUTERMOST) {
memcpy(&(p_vie_dest[iMinor]), &(p_vie_k[iMinor]), sizeof(v4));
memcpy(&(p_v_n_dest[iMinor]), &(p_v_n_k[iMinor]), sizeof(f64_vec3));
} else {
memset(&(p_vie_dest[iMinor]), 0, sizeof(v4));
memset(&(p_v_n_dest[iMinor]), 0, sizeof(f64_vec3));
};
}
}
__global__ void kernelPopulateBackwardOhmsLaw(
f64 h_use,
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
f64_vec3 * __restrict__ p_B,
f64 * __restrict__ p_LapAz,
f64_vec2 * __restrict__ p_GradAz,
f64_vec2 * __restrict__ p_GradTe,
nvals * __restrict__ p_n_minor_use,
T3 * __restrict__ p_T_minor_use,
v4 * __restrict__ p_vie_src,
f64_vec3 * __restrict__ p_v_n_src,
AAdot * __restrict__ p_AAdot_src,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ ROCAzdotduetoAdvection,
// Now going to need to go through and see this set 0 or sensible every time.
f64_vec3 * __restrict__ p_vn0_dest,
v4 * __restrict__ p_v0_dest,
OhmsCoeffs * __restrict__ p_OhmsCoeffs_dest,
//AAdot * __restrict__ p_AAdot_intermediate,
f64 * __restrict__ p_Iz0,
f64 * __restrict__ p_sigma_zz,
f64 * __restrict__ p_denom_i,
f64 * __restrict__ p_denom_e,
f64 * __restrict__ p_effect_of_viz0_on_vez0,
f64 * __restrict__ p_beta_ie_z,
bool const bSwitchSave)
{
// Don't forget we can use 16KB shared memory to save a bit of overspill:
// (16*1024)/(512*8) = 4 doubles only for 512 threads. 128K total register space per SM we think.
__shared__ f64 Iz[threadsPerTileMinor], sigma_zz[threadsPerTileMinor];
// __shared__ f64 Iz_k[threadsPerTileMinor];
__shared__ f64_vec2 omega[threadsPerTileMinor], grad_Az[threadsPerTileMinor],
gradTe[threadsPerTileMinor];
// Putting 8 reduces to 256 simultaneous threads. Experiment with 4 in shared.
// f64 viz0_coeff_on_Lap_Az, vez0_coeff_on_Lap_Az; // THESE APPLY TO FEINT VERSION. ASSUME NOT FEINT FIRST.
v4 v0;
f64 denom, ROCAzdot_antiadvect, AreaMinor;
f64_vec3 vn0;
long const iMinor = threadIdx.x + blockIdx.x * blockDim.x;
structural info = p_info_minor[iMinor];
// Can see no reason not to put OUTERMOST here. No point creating a big gradient of vz to it.
if ((info.flag == DOMAIN_VERTEX) || (info.flag == DOMAIN_TRIANGLE)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 vie_k = p_vie_src[iMinor];
f64_vec3 v_n_src = p_v_n_src[iMinor];
nvals n_use = p_n_minor_use[iMinor];
AreaMinor = p_AreaMinor[iMinor];
// Are we better off with operator = or with memcpy?
vn0 = v_n_src;
// if ((TESTTRI)) printf("GPU %d vie_k %1.14E %1.14E\n", iMinor, vie_k.vxy.x, vie_k.vxy.y);
{
f64_vec3 MAR;
memcpy(&MAR, p_MAR_neut + iMinor, sizeof(f64_vec3));
// CHECK IT IS INTENDED TO AFFECT Nv
// REVERTED THE EDIT TO USE 1/n -- THIS WILL NOT GIVE CORRECT M.A.R. EFFECT ON INTEGRAL nv
// We need conservation laws around shock fronts.
vn0.x += h_use * (MAR.x / (AreaMinor*n_use.n_n));
// p_one_over_n[iMinor].n_n/ (AreaMinor));
vn0.y += h_use * (MAR.y / (AreaMinor*n_use.n_n));// MomAddRate is addition rate for Nv. Divide by N.
memcpy(&MAR, p_MAR_ion + iMinor, sizeof(f64_vec3));
v0.vxy = vie_k.vxy + h_use * (m_i*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.viz = vie_k.viz + h_use * MAR.z / (n_use.n*AreaMinor);
memcpy(&MAR, p_MAR_elec + iMinor, sizeof(f64_vec3));
v0.vxy += h_use * (m_e*MAR.xypart() / (n_use.n*(m_i + m_e)*AreaMinor));
v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
if (v0.vez != v0.vez) printf("NANVEZ %d v_k %1.9E MAR.z %1.9E \n", iMinor, vie_k.vez, MAR.z);
if (((TESTTRI))) printf("\nGPU %d a:MAR_e %1.10E %1.10E z %1.10E MAR.y %1.10E Area %1.10E\n", iMinor,
h_use * (m_e*MAR.x / (n_use.n*(m_i + m_e)*AreaMinor)),
h_use * (m_e*MAR.y / (n_use.n*(m_i + m_e)*AreaMinor)),
MAR.z,
MAR.y,
AreaMinor);
}
OhmsCoeffs ohm;
f64 beta_ie_z, LapAz;
f64 cross_section_times_thermal_en, cross_section_times_thermal_in,
nu_eiBar, nu_eHeart;
T3 T = p_T_minor_use[iMinor];
{
// Dimensioning inside a brace allows the following vars to go out of scope at the end of the brace.
f64 sqrt_Te, ionneut_thermal, electron_thermal,
lnLambda, s_in_MT, s_en_MT, s_en_visc;
sqrt_Te = sqrt(T.Te);
ionneut_thermal = sqrt(T.Ti / m_ion + T.Tn / m_n); // hopefully not sqrt(0)
electron_thermal = sqrt_Te * over_sqrt_m_e;
lnLambda = Get_lnLambda_d(n_use.n, T.Te);
{
f64 s_in_visc_dummy;
Estimate_Ion_Neutral_Cross_sections_d(T.Ti*one_over_kB, &s_in_MT, &s_in_visc_dummy);
}
Estimate_Ion_Neutral_Cross_sections_d(T.Te*one_over_kB, &s_en_MT, &s_en_visc);
//nu_ne_MT = s_en_MT * electron_thermal * n_use.n; // have to multiply by n_e for nu_ne_MT
//nu_ni_MT = s_in_MT * ionneut_thermal * n_use.n;
//nu_in_MT = s_in_MT * ionneut_thermal * n_use.n_n;
//nu_en_MT = s_en_MT * electron_thermal * n_use.n_n;
cross_section_times_thermal_en = s_en_MT * electron_thermal;
cross_section_times_thermal_in = s_in_MT * ionneut_thermal;
nu_eiBar = nu_eiBarconst * kB_to_3halves*n_use.n*lnLambda / (T.Te*sqrt_Te);
nu_eHeart = 1.87*nu_eiBar + n_use.n_n*s_en_visc*electron_thermal;
if (nu_eiBar != nu_eiBar) printf("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n"
"iMinor %d n_use.n %1.9E lnLambda %1.9E Te %1.9E sqrt %1.9E \n",
iMinor, n_use.n, lnLambda, T.Te, sqrt_Te);
// ARTIFICIAL CHANGE TO STOP IONS SMEARING AWAY OFF OF NEUTRAL BACKGROUND:
if (n_use.n_n > ARTIFICIAL_RELATIVE_THRESH *n_use.n) {
cross_section_times_thermal_en *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
cross_section_times_thermal_in *= n_use.n_n / (ARTIFICIAL_RELATIVE_THRESH *n_use.n);
// So at 1e18 vs 1e8 it's 10 times stronger
// At 1e18 vs 1e6 it's 1000 times stronger
// nu starts at about 1e11 at the place it failed at 35ns. So 10000 times stronger gives us 1e15.
};
}
denom = 1.0 + h_use * M_e_over_en* (cross_section_times_thermal_en*n_use.n)
+ h_use*M_i_over_in* (cross_section_times_thermal_in*n_use.n);
vn0 /= denom; // It is now the REDUCED value
ohm.beta_ne = h_use*(M_e_over_en)*(cross_section_times_thermal_en*n_use.n) / denom;
ohm.beta_ni = h_use*(M_i_over_in)*(cross_section_times_thermal_in*n_use.n) / denom;
// Now we do vexy:
grad_Az[threadIdx.x] = p_GradAz[iMinor];
gradTe[threadIdx.x] = p_GradTe[iMinor];
LapAz = p_LapAz[iMinor];
// SOON GET RID OF THIS CRAP:
f64 ROCAzdot_antiadvect = ROCAzdotduetoAdvection[iMinor];
if (((TESTTRI))) printf("GPU %d: LapAz %1.14E\n", CHOSEN, LapAz);
v0.vxy +=
(h_use / ((m_i + m_e)))*(m_n*M_i_over_in*(cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*
(vn0.xypart());
denom = 1.0 + (h_use / (m_i + m_e))*(
m_n* M_i_over_in* (cross_section_times_thermal_in*n_use.n_n)
+ m_n * M_e_over_en*(cross_section_times_thermal_en*n_use.n_n))*(1.0 - ohm.beta_ne - ohm.beta_ni);
v0.vxy /= denom;
ohm.beta_xy_z = (h_use * q / (c*(m_i + m_e)*denom)) * grad_Az[threadIdx.x]; // coeff on viz-vez
omega[threadIdx.x] = qovermc*p_B[iMinor].xypart();
f64 nu_ei_effective = nu_eiBar * (1.0 - 0.9*nu_eiBar*(nu_eHeart*nu_eHeart + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT) /
(nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].x*omega[threadIdx.x].x + omega[threadIdx.x].y*omega[threadIdx.x].y + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT)));
// if (nu_ei_effective != nu_ei_effective) printf("nu_ei NaN: omega %1.8E %1.8E nu_eHeart %1.8E nu_eiBar %1.8E\n",
// omega[threadIdx.x].x, omega[threadIdx.x].y, nu_eHeart, nu_eiBar);
AAdot AAzdot_k = p_AAdot_src[iMinor];
v0.viz +=
-h_use*qoverMc*(AAzdot_k.Azdot
+ h_use * ROCAzdot_antiadvect + h_use * c*c*LapAz)
- h_use*qoverMc*(v0.vxy).dot(grad_Az[threadIdx.x]);
// Still omega_ce . Check formulas.
v0.viz +=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_i*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x])));
v0.viz += h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *vn0.z;
denom = 1.0 + h_use * h_use*4.0*M_PI*qoverM*q*n_use.n
+ h_use * qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)) +
h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *(1.0 - ohm.beta_ni)
+ h_use *moverM*nu_ei_effective;
if (bSwitchSave) p_denom_i[iMinor] = denom;
// viz0_coeff_on_Lap_Az = -0.5*h_use*qoverMc*h_use*c*c / denom;
v0.viz /= denom;
if (((TESTTRI))) printf("viz0 divided %1.14E denom %1.14E \n", v0.viz, denom);
ohm.sigma_i_zz = h_use * qoverM / denom;
beta_ie_z = (h_use*h_use*4.0*M_PI*qoverM*q*n_use.n
+ h_use*qoverMc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))
+ h_use * M_n_over_ni*(cross_section_times_thermal_in*n_use.n_n) *ohm.beta_ne
+ h_use * moverM*nu_ei_effective) / denom;
v0.vez +=
h_use *qovermc*(AAzdot_k.Azdot
+ h_use * ROCAzdot_antiadvect
+ h_use * c*c*(LapAz + FOURPI_Q_OVER_C*n_use.n*v0.viz))
+ h_use*qovermc*(v0.vxy + ohm.beta_xy_z*v0.viz).dot(grad_Az[threadIdx.x]);
// implies:
f64 effect_of_viz0_on_vez0 =
h_use * qovermc*h_use * c*c* FOURPI_Q_OVER_C*n_use.n
+ h_use*qovermc*(ohm.beta_xy_z.dot(grad_Az[threadIdx.x]));
v0.vez -=
1.5*h_use*nu_eiBar*((omega[threadIdx.x].x*qovermc*BZ_CONSTANT - nu_eHeart * omega[threadIdx.x].y)*gradTe[threadIdx.x].x +
(omega[threadIdx.x].y*qovermc*BZ_CONSTANT + nu_eHeart * omega[threadIdx.x].x)*gradTe[threadIdx.x].y) /
(m_e*nu_eHeart*(nu_eHeart*nu_eHeart + omega[threadIdx.x].dot(omega[threadIdx.x]) + qovermc*BZ_CONSTANT*qovermc*BZ_CONSTANT));
// could store this from above and put opposite -- dividing by m_e instead of m_i
// overdue..?
v0.vez += h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(vn0.z + ohm.beta_ni * v0.viz)
+ h_use*nu_ei_effective*v0.viz;
// implies:
effect_of_viz0_on_vez0 +=
h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni + h_use*nu_ei_effective;
denom = 1.0 + (h_use*h_use*4.0*M_PI*q*eoverm*n_use.n
+ h_use*qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z)))*(1.0 - beta_ie_z)
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *(1.0 - ohm.beta_ne - ohm.beta_ni * beta_ie_z)
+ h_use*nu_ei_effective*(1.0 - beta_ie_z);
// vez0_coeff_on_Lap_Az = h_use * h_use*0.5*qovermc* c*c / denom;
ohm.sigma_e_zz =
(-h_use * eoverm
+ h_use * h_use*4.0*M_PI*q*eoverm*n_use.n*ohm.sigma_i_zz
+ h_use *qovermc*(grad_Az[threadIdx.x].dot(ohm.beta_xy_z))*ohm.sigma_i_zz
+ h_use*M_n_over_ne*(cross_section_times_thermal_en*n_use.n_n) *ohm.beta_ni*ohm.sigma_i_zz
+ h_use*nu_ei_effective*ohm.sigma_i_zz)
/ denom;
v0.vez /= denom;
effect_of_viz0_on_vez0 /= denom; // of course
if (bSwitchSave) {
p_denom_e[iMinor] = denom;
p_effect_of_viz0_on_vez0[iMinor] = effect_of_viz0_on_vez0;
p_beta_ie_z[iMinor] = beta_ie_z; // see that doing it this way was not best.
}
else {
// #########################################################################################################
// DEBUG: pass graphing parameters through these.
// #########################################################################################################
p_denom_i[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective;
p_denom_e[iMinor] = M_n_over_ne*cross_section_times_thermal_en*n_use.n_n /
(M_n_over_ne*cross_section_times_thermal_en*n_use.n_n + nu_ei_effective);
};
// Now update viz(Ez):
v0.viz += beta_ie_z * v0.vez;
ohm.sigma_i_zz += beta_ie_z * ohm.sigma_e_zz;
// sigma_e_zz and sigma_i_zz are change in vz for a change in Ez
{
f64 EzShape = GetEzShape(info.pos.modulus());
ohm.sigma_i_zz *= EzShape;
ohm.sigma_e_zz *= EzShape;
}
// Think maybe we should get rid of most of this routine out of the subcycle.
// Rate of acceleration over timestep due to resistance, pressure, thermal force etc could be stored.
// Saving off some eqn data isn't so bad when we probably overflow registers and L1 here anyway.
// All we need is to know that we update sigma
// We can do addition of
// ==============================================================================================
p_v0_dest[iMinor] = v0;
p_OhmsCoeffs_dest[iMinor] = ohm;
p_vn0_dest[iMinor] = vn0;
Iz[threadIdx.x] = q*AreaMinor*n_use.n*(v0.viz - v0.vez);
sigma_zz[threadIdx.x] = q*AreaMinor*n_use.n*(ohm.sigma_i_zz - ohm.sigma_e_zz);
}
else {
// Non-domain triangle or vertex
// ==============================
// Need to decide whether crossing_ins triangle will experience same accel routine as the rest?
// I think yes so go and add it above??
// We said v_r = 0 necessarily to avoid sending mass into ins.
// So how is that achieved there? What about energy loss?
// Need to determine a good way. Given what v_r in tri represents. We construe it to be AT the ins edge so
// ...
Iz[threadIdx.x] = 0.0;
sigma_zz[threadIdx.x] = 0.0;
// if ((iMinor < BEGINNING_OF_CENTRAL) && ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)))
// {
// p_AAdot_intermediate[iMinor].Azdot = 0.0;
// Set Az equal to neighbour in every case, after Accelerate routine.
// }
// else {
// Let's make it go right through the middle of a triangle row for simplicity.
//f64 Jz = 0.0;
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
//{
// // Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// // ASSUME we are fed Iz_prescribed.
// //Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
// AreaMinor = p_AreaMinor[iMinor];
// Jz = negative_Iz_per_triangle / AreaMinor; // Iz would come from multiplying back by area and adding.
//};
// AAdot temp = p_AAdot_src[iMinor];
// temp.Azdot += h_use * c*(c*p_LapAz[iMinor]);// +4.0*M_PI*Jz);
// + h_use * ROCAzdot_antiadvect // == 0
// p_AAdot_intermediate[iMinor] = temp; //
// };
};
__syncthreads();
// .Collect Jz = Jz0 + sigma_zz Ez_strength on each minor cell
// .Estimate Ez
// sigma_zz should include EzShape for this minor cell
// The mission if iPass == 0 was passed is to save off Iz0, SigmaIzz.
// First pass set Ez_strength = 0.0.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + k];
Iz[threadIdx.x] += Iz[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sigma_zz[threadIdx.x] += sigma_zz[threadIdx.x + s - 1];
Iz[threadIdx.x] += Iz[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sigma_zz[blockIdx.x] = sigma_zz[0];
p_Iz0[blockIdx.x] = Iz[0];
}
// Wish to make the Jz contribs to Azdot on each side of the ins exactly equal in L1,
// meant making this long routine even longer with collecting Iz_k.
}
/*__global__ void Estimate_Effect_on_Integral_Azdot_from_Jz_and_LapAz(
f64 hstep,
structural * __restrict__ p_info,
nvals * __restrict__ p_nvals_k,
nvals * __restrict__ p_nvals_use,
v4 * __restrict__ p_vie_k,
v4 * __restrict__ p_vie_kplus1,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz,
AAdot * __restrict__ p_Azdot,
f64 * __restrict__ p_tile1, // +ve Jz
f64 * __restrict__ p_tile2, // -ve Jz
f64 * __restrict__ p_tile3, // LapAz
f64 * __restrict__ p_tile4, // integrate Azdot diff
f64 * __restrict__ p_tile5,
f64 * __restrict__ p_tile6
)
{
__shared__ f64 sum1[threadsPerTileMinor];
__shared__ f64 sum2[threadsPerTileMinor];
__shared__ f64 sum3[threadsPerTileMinor];
__shared__ f64 sum4[threadsPerTileMinor];
__shared__ f64 sum5[threadsPerTileMinor];
__shared__ f64 sum6[threadsPerTileMinor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
structural info = p_info[iMinor];
nvals n_k = p_nvals_k[iMinor];
nvals n_use = p_nvals_use[iMinor];
v4 v_k = p_vie_k[iMinor];
v4 v_kplus1 = p_vie_kplus1[iMinor];
f64 AreaMinor = p_AreaMinor[iMinor];
f64 LapAz = p_LapAz[iMinor];
sum1[threadIdx.x] = 0.0;
sum2[threadIdx.x] = 0.0;
sum3[threadIdx.x] = 0.0;
sum4[threadIdx.x] = 0.0;
sum5[threadIdx.x] = 0.0;
sum6[threadIdx.x] = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
sum1[threadIdx.x] =
hstep*c*c*0.5*FOURPI_OVER_C * q*n_k.n*(v_k.viz - v_k.vez)*AreaMinor
+ hstep*c*0.5*FOUR_PI*q*n_use.n*(v_kplus1.viz - v_kplus1.vez)*AreaMinor;
// Was n used consistently?
} else {
//if ((iMinor >= numStartZCurrentTriangles) && (iMinor < numEndZCurrentTriangles))
if (info.flag == REVERSE_JZ_TRI)
sum2[threadIdx.x] = hstep*c*4.0*M_PI*negative_Iz_per_triangle;
}
// make sure we copy from the code:
sum3[threadIdx.x] = hstep*c*c*LapAz*AreaMinor;
sum4[threadIdx.x] = fabs(hstep*c*c*LapAz*AreaMinor);
sum5[threadIdx.x] = p_Azdot[iMinor].Azdot * AreaMinor;
sum6[threadIdx.x] = fabs(p_Azdot[iMinor].Azdot * AreaMinor);
// -----------------------------------------------------------------------------
__syncthreads();
// .Collect Jz = Jz0 + sigma_zz Ez_strength on each minor cell
// .Estimate Ez
// sigma_zz should include EzShape for this minor cell
// The mission if iPass == 0 was passed is to save off Iz0, SigmaIzz.
// First pass set Ez_strength = 0.0.
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum1[threadIdx.x] += sum1[threadIdx.x + k];
sum2[threadIdx.x] += sum2[threadIdx.x + k];
sum3[threadIdx.x] += sum3[threadIdx.x + k];
sum4[threadIdx.x] += sum4[threadIdx.x + k];
sum5[threadIdx.x] += sum5[threadIdx.x + k];
sum6[threadIdx.x] += sum6[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum1[threadIdx.x] += sum1[threadIdx.x + s - 1];
sum2[threadIdx.x] += sum2[threadIdx.x + s - 1];
sum3[threadIdx.x] += sum3[threadIdx.x + s - 1];
sum4[threadIdx.x] += sum4[threadIdx.x + s - 1];
sum5[threadIdx.x] += sum5[threadIdx.x + s - 1];
sum6[threadIdx.x] += sum6[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_tile1[blockIdx.x] = sum1[0];
p_tile2[blockIdx.x] = sum2[0];
p_tile3[blockIdx.x] = sum3[0];
p_tile4[blockIdx.x] = sum4[0];
p_tile5[blockIdx.x] = sum5[0];
p_tile6[blockIdx.x] = sum6[0];
}
}*/
__global__ void kernelCalculateVelocityAndAzdot(
f64 h_use,
structural * p_info_minor,
f64_vec3 * __restrict__ p_vn0,
v4 * __restrict__ p_v0,
OhmsCoeffs * __restrict__ p_OhmsCoeffs,
AAdot * __restrict__ p_AAzdot_src,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz, // would it be better just to be loading the Azdot0 relation?
f64 * __restrict__ p_ROCAzdotantiadvect,
AAdot * __restrict__ p_AAzdot_out,
v4 * __restrict__ p_vie_out,
f64_vec3 * __restrict__ p_vn_out)
{
long iMinor = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info_minor[iMinor];
AAdot temp = p_AAzdot_src[iMinor];
temp.Azdot += h_use*(c*c*p_LapAz[iMinor] + p_ROCAzdotantiadvect[iMinor]);
// We did not add LapAz into Azdot already in PopBackwardOhms.
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 v;
nvals n_use = p_n_minor[iMinor];
OhmsCoeffs ohm = p_OhmsCoeffs[iMinor];
v4 v0 = p_v0[iMinor];
f64_vec3 v_n = p_vn0[iMinor]; // 3 sep
v.vez = v0.vez + ohm.sigma_e_zz * Ez_strength; // 2
v.viz = v0.viz + ohm.sigma_i_zz * Ez_strength; // 2
v.vxy = v0.vxy + ohm.beta_xy_z * (v.viz - v.vez); // 4
v_n.x += (ohm.beta_ne + ohm.beta_ni)*v.vxy.x; // 2
v_n.y += (ohm.beta_ne + ohm.beta_ni)*v.vxy.y;
v_n.z += ohm.beta_ne * v.vez + ohm.beta_ni * v.viz;
if (info.flag == CROSSING_INS) {
f64_vec2 rhat = info.pos / info.pos.modulus();
v_n -= Make3((v_n.dotxy(rhat))*rhat, 0.0);
v.vxy -= v.vxy.dot(rhat)*rhat;
}
memcpy(&(p_vie_out[iMinor]), &v, sizeof(v4)); // operator = vs memcpy
p_vn_out[iMinor] = v_n;
if (info.flag == OUTERMOST) {
temp.Azdot = 0.0;
temp.Az = 0.0;
}
else {
// BACKWARD:
temp.Azdot += h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez); // logical for C_INS too
}
if ((TESTTRI2)) printf(
"CVAA iMinor %d v0.vez %1.9E sigma_e_zz %1.9E Ez %1.9E v.vez %1.9E\n",
iMinor, v0.vez, ohm.sigma_e_zz, Ez_strength, v.vez);
}
else {
memset(&(p_vie_out[iMinor]), 0, sizeof(v4));
memset(&(p_vn_out[iMinor]), 0, sizeof(f64_vec3));
f64 Jz = 0.0;
if (info.flag == REVERSE_JZ_TRI)
{
// Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// ASSUME we are fed Iz_prescribed.
//Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
// printf("temp.Azdot %1.10E ", temp.Azdot);
temp.Azdot += h_use*c*FOUR_PI*Jz; // Iz would come from multiplying back by area and adding.
// printf("%d Iz %1.14E Area %1.14E Jz %1.14E Azdot %1.14E \n",
// iMinor,
// negative_Iz_per_triangle, AreaMinor, Jz, temp.Azdot);
};
}
// + h_use * ROCAzdot_antiadvect // == 0
p_AAzdot_out[iMinor] = temp;
// Would rather make this a separate routine beforehand.
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz + data_1.viz
// - data_k.vez - data_1.vez));
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz - data_k.vez));
// intermediate
}
__global__ void kernelCalculateVelocityAndAzdot_noadvect(
f64 h_use,
structural * p_info_minor,
f64_vec3 * __restrict__ p_vn0,
v4 * __restrict__ p_v0,
OhmsCoeffs * __restrict__ p_OhmsCoeffs,
AAdot * __restrict__ p_AAzdot_src,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz, // would it be better just to be loading the Azdot0 relation?
AAdot * __restrict__ p_AAzdot_out,
v4 * __restrict__ p_vie_out,
f64_vec3 * __restrict__ p_vn_out )
{
long iMinor = blockIdx.x*blockDim.x + threadIdx.x;
structural info = p_info_minor[iMinor];
AAdot temp = p_AAzdot_src[iMinor];
temp.Azdot += h_use*(c*c*p_LapAz[iMinor]);
// We did not add LapAz into Azdot already in PopBackwardOhms.
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 v;
nvals n_use = p_n_minor[iMinor];
OhmsCoeffs ohm = p_OhmsCoeffs[iMinor];
v4 v0 = p_v0[iMinor];
f64_vec3 v_n = p_vn0[iMinor]; // 3 sep
// debug:
long iVertex = iMinor - BEGINNING_OF_CENTRAL;
if (TESTACCEL) printf("iVertex %d v0.xy %1.9E %1.9E\n",
iVertex, v0.vxy.x, v0.vxy.y);
v.vez = v0.vez + ohm.sigma_e_zz * Ez_strength; // 2
v.viz = v0.viz + ohm.sigma_i_zz * Ez_strength; // 2
v.vxy = v0.vxy + ohm.beta_xy_z * (v.viz - v.vez); // 4
if (TESTACCEL) printf("iVertex %d ohm.beta_xz yz %1.9E %1.9E viz %1.9E vez %1.9E effect xy %1.9E %1.9E\n",
iVertex,
ohm.beta_xy_z.x, ohm.beta_xy_z.y,
v.viz,
v.vez,
ohm.beta_xy_z.x * (v.viz - v.vez),
ohm.beta_xy_z.y * (v.viz - v.vez));
//if (TESTACCEL) printf("iVertex %d ohm.beta_yz %1.9E viz %1.9E vez %1.9E effect %1.9E\n",
// iVertex,
// ohm.beta_xy_z.y,
// v.viz,
// v.vez,
// ohm.beta_xy_z.y * (v.viz - v.vez));
v_n.x += (ohm.beta_ne + ohm.beta_ni)*v.vxy.x; // 2
v_n.y += (ohm.beta_ne + ohm.beta_ni)*v.vxy.y;
v_n.z += ohm.beta_ne * v.vez + ohm.beta_ni * v.viz;
if (TESTVNY) printf("%d v_n.y %1.9E since ohm %1.9E v.vxy.y %1.9E \n", iMinor, v_n.y,
(ohm.beta_ne + ohm.beta_ni), v.vxy.y);
if (TESTVNX) printf("%d v_n.y %1.9E since ohm %1.9E v.vxy.y %1.9E \n", iMinor, v_n.x,
(ohm.beta_ne + ohm.beta_ni), v.vxy.x);
// if (info.flag == CROSSING_INS) {
// f64_vec2 rhat = info.pos / info.pos.modulus();
// v_n -= Make3((v_n.dotxy(rhat))*rhat, 0.0);
// v.vxy -= v.vxy.dot(rhat)*rhat;
//
// if (TESTACCEL) printf("v.vxy after negate r component : %1.9E %1.9E\n", v.vxy.x, v.vxy.y);
// }
memcpy(&(p_vie_out[iMinor]), &v, sizeof(v4)); // operator = vs memcpy
p_vn_out[iMinor] = v_n;
// if (info.flag == OUTERMOST) {
// temp.Azdot = 0.0;
// temp.Az = 0.0; // really!
// } else {
// BACKWARD:
temp.Azdot += h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez); // logical for Crossing_INS too
// }
if (TESTACCEL) printf("CVAA:iVertex %d v_out.xy %1.9E %1.9E\n", iVertex, v.vxy.x, v.vxy.y);
if (TESTVEZ) printf("%d CVAA vez %1.11E v0 %1.11E Ez_strength %1.14E sigma %1.14E \n"
"Azdot %1.9E components: k %1.9E h_use*(c*c*p_LapAz) %1.9E hc4piJ %1.9E\n"
"n viz vez %1.14E %1.14E %1.14E\n"
, iMinor, v.vez, v0.vez,
Ez_strength, ohm.sigma_e_zz, temp.Azdot, p_AAzdot_src[iMinor].Azdot, h_use*(c*c*p_LapAz[iMinor]),
h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez),
n_use.n, v.viz, v.vez
);
} else {
memset(&(p_vie_out[iMinor]), 0, sizeof(v4));
memset(&(p_vn_out[iMinor]), 0, sizeof(f64_vec3));
f64 Jz = 0.0;
if (info.flag == REVERSE_JZ_TRI)
{
// Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// ASSUME we are fed Iz_prescribed.
//Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
// printf("temp.Azdot %1.10E ", temp.Azdot);
temp.Azdot += h_use*c*FOUR_PI*Jz; // Iz would come from multiplying back by area and adding.
// printf("%d Iz %1.14E Area %1.14E Jz %1.14E Azdot %1.14E \n",
// iMinor,
// negative_Iz_per_triangle, AreaMinor, Jz, temp.Azdot);
};
}
// + h_use * ROCAzdot_antiadvect // == 0
p_AAzdot_out[iMinor] = temp;
// Would rather make this a separate routine beforehand.
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz + data_1.viz
// - data_k.vez - data_1.vez));
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz - data_k.vez));
// intermediate
}
/*
__global__ void kernelCalculateVelocityAndAzdot_noadvect__debugintegrate(
f64 h_use,
structural * p_info_minor,
f64_vec3 * __restrict__ p_vn0,
v4 * __restrict__ p_v0,
OhmsCoeffs * __restrict__ p_OhmsCoeffs,
AAdot * __restrict__ p_AAzdot_src,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz, // would it be better just to be loading the Azdot0 relation?
AAdot * __restrict__ p_AAzdot_out,
v4 * __restrict__ p_vie_out,
f64_vec3 * __restrict__ p_vn_out,
f64 * __restrict__ p_integ_Jz1,
f64 * __restrict__ p_integ_Jz2,
f64 * __restrict__ p_integ_LapAz
)
{
long iMinor = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ f64 sum_Jzdomain[threadsPerTileMinor];
__shared__ f64 sum_Jzreverse[threadsPerTileMinor];
__shared__ f64 sum_LapAz[threadsPerTileMinor];
structural info = p_info_minor[iMinor];
AAdot temp = p_AAzdot_src[iMinor];
sum_Jzdomain[threadIdx.x] = 0.0;
sum_Jzreverse[threadIdx.x] = 0.0;
temp.Azdot += h_use*(c*c*p_LapAz[iMinor]);
sum_LapAz[threadIdx.x] = c*c*p_LapAz[iMinor] * p_AreaMinor[iMinor];
// We did not add LapAz into Azdot already in PopBackwardOhms.
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 v;
nvals n_use = p_n_minor[iMinor];
OhmsCoeffs ohm = p_OhmsCoeffs[iMinor];
v4 v0 = p_v0[iMinor];
f64_vec3 v_n = p_vn0[iMinor]; // 3 sep
long iVertex = iMinor - BEGINNING_OF_CENTRAL;
if (TESTACCEL) printf("iVertex %d v0.y %1.9E\n", iVertex, v0.vxy.y);
v.vez = v0.vez + ohm.sigma_e_zz * Ez_strength; // 2
v.viz = v0.viz + ohm.sigma_i_zz * Ez_strength; // 2
v.vxy = v0.vxy + ohm.beta_xy_z * (v.viz - v.vez); // 4
if (TESTACCEL) printf("iVertex %d ohm.beta_yz %1.9E viz %1.9E vez %1.9E effect %1.9E\n",
iVertex,
ohm.beta_xy_z.y,
v.viz,
v.vez,
ohm.beta_xy_z * (v.viz - v.vez));
v_n.x += (ohm.beta_ne + ohm.beta_ni)*v.vxy.x; // 2
v_n.y += (ohm.beta_ne + ohm.beta_ni)*v.vxy.y;
v_n.z += ohm.beta_ne * v.vez + ohm.beta_ni * v.viz;
if (TESTVNY) printf("%d v_n.y %1.9E since ohm %1.9E v.vxy.y %1.9E \n", iMinor, v_n.y,
(ohm.beta_ne + ohm.beta_ni), v.vxy.y);
//
// if (info.flag == CROSSING_INS) {
// f64_vec2 rhat = info.pos / info.pos.modulus();
// v_n -= Make3((v_n.dotxy(rhat))*rhat, 0.0);
// v.vxy -= v.vxy.dot(rhat)*rhat;
// }
memcpy(&(p_vie_out[iMinor]), &v, sizeof(v4)); // operator = vs memcpy
p_vn_out[iMinor] = v_n;
temp.Azdot += h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez); // logical for Crossing_INS too
if (TESTACCEL) printf("CVAA:iVertex %d v_out.y %1.9E\n", iVertex, v.vxy.y);
if (TESTVEZ) printf("%d CVAA vez %1.9E v0 %1.9E Ez %1.9E sigma %1.9E\n", iMinor, v.vez, v0.vez,
Ez_strength, ohm.sigma_e_zz);
sum_Jzdomain[threadIdx.x] = c*FOUR_PI*q*n_use.n*(v.viz - v.vez)*p_AreaMinor[iMinor];
} else {
memset(&(p_vie_out[iMinor]), 0, sizeof(v4));
memset(&(p_vn_out[iMinor]), 0, sizeof(f64_vec3));
f64 Jz = 0.0;
if (info.flag == REVERSE_JZ_TRI)
{
// Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// ASSUME we are fed Iz_prescribed.
//Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
temp.Azdot += h_use*c*FOUR_PI*Jz; // Iz would come from multiplying back by area and adding.
sum_Jzreverse[threadIdx.x] = negative_Iz_per_triangle*c*FOUR_PI;
};
}
p_AAzdot_out[iMinor] = temp;
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sum_Jzdomain[threadIdx.x] += sum_Jzdomain[threadIdx.x + k];
sum_LapAz[threadIdx.x] += sum_LapAz[threadIdx.x + k];
sum_Jzreverse[threadIdx.x] += sum_Jzreverse[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sum_Jzdomain[threadIdx.x] += sum_Jzdomain[threadIdx.x + s - 1];
sum_LapAz[threadIdx.x] += sum_LapAz[threadIdx.x + s - 1];
sum_Jzreverse[threadIdx.x] += sum_Jzreverse[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_integ_Jz1[blockIdx.x] = sum_Jzdomain[0];
p_integ_Jz2[blockIdx.x] = sum_Jzreverse[0];
p_integ_LapAz[blockIdx.x] = sum_LapAz[0];
}
}*/
/*
__global__ void kernelCalculateVelocityAndAzdot_noadvect_SPIT(
f64 h_use,
structural * p_info_minor,
LONG3 * p_tricornerindex,
f64_vec3 * __restrict__ p_vn0,
v4 * __restrict__ p_v0,
OhmsCoeffs * __restrict__ p_OhmsCoeffs,
AAdot * __restrict__ p_AAzdot_src,
nvals * __restrict__ p_n_minor,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_LapAz, // would it be better just to be loading the Azdot0 relation?
AAdot * __restrict__ p_AAzdot_out,
v4 * __restrict__ p_vie_out,
f64_vec3 * __restrict__ p_vn_out)
{
long iMinor = blockIdx.x*blockDim.x + threadIdx.x;
bool bReport = false;
if (iMinor < BEGINNING_OF_CENTRAL) {
LONG3 tci = p_tricornerindex[iMinor];
if ((tci.i1 == VERTCHOSEN) || (tci.i2 == VERTCHOSEN) || (tci.i3 == VERTCHOSEN))
bReport = true;
}
else {
if (iMinor - BEGINNING_OF_CENTRAL == VERTCHOSEN) bReport = true;
}
structural info = p_info_minor[iMinor];
AAdot temp = p_AAzdot_src[iMinor];
f64 store_Azdot = temp.Azdot;
temp.Azdot += h_use*(c*c*p_LapAz[iMinor]);
// We did not add LapAz into Azdot already in PopBackwardOhms.
f64 store_hccLap = h_use*c*c*p_LapAz[iMinor];
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == DOMAIN_VERTEX)
|| (info.flag == CROSSING_INS) || (info.flag == OUTERMOST))
{
v4 v;
nvals n_use = p_n_minor[iMinor];
OhmsCoeffs ohm = p_OhmsCoeffs[iMinor];
v4 v0 = p_v0[iMinor];
f64_vec3 v_n = p_vn0[iMinor]; // 3 sep
long iVertex = iMinor - BEGINNING_OF_CENTRAL;
v.vez = v0.vez + ohm.sigma_e_zz * Ez_strength; // 2
v.viz = v0.viz + ohm.sigma_i_zz * Ez_strength; // 2
v.vxy = v0.vxy + ohm.beta_xy_z * (v.viz - v.vez); // 4
v_n.x += (ohm.beta_ne + ohm.beta_ni)*v.vxy.x; // 2
v_n.y += (ohm.beta_ne + ohm.beta_ni)*v.vxy.y;
v_n.z += ohm.beta_ne * v.vez + ohm.beta_ni * v.viz;
if (info.flag == CROSSING_INS) {
f64_vec2 rhat = info.pos / info.pos.modulus();
v_n -= Make3((v_n.dotxy(rhat))*rhat, 0.0);
v.vxy -= v.vxy.dot(rhat)*rhat;
}
memcpy(&(p_vie_out[iMinor]), &v, sizeof(v4)); // operator = vs memcpy
p_vn_out[iMinor] = v_n;
if (info.flag == OUTERMOST) {
temp.Azdot = 0.0;
temp.Az = 0.0; // really!
}
else {
// BACKWARD:
temp.Azdot += h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez); // logical for Crossing_INS too
}
if (bReport) printf("%d Azdot_old %1.10E new %1.10E Az %1.10E hccLapAz %1.10E hc4piJz %1.10E n %1.8E vez %1.8E\n", iMinor, store_Azdot, temp.Azdot, temp.Az,
store_hccLap, h_use*c*FOUR_PI*q*n_use.n*(v.viz - v.vez), n_use.n, v.vez);
} else {
memset(&(p_vie_out[iMinor]), 0, sizeof(v4));
memset(&(p_vn_out[iMinor]), 0, sizeof(f64_vec3));
f64 Jz = 0.0;
if (info.flag == REVERSE_JZ_TRI)
{
// Azdotdot = c^2 (Lap Az + 4pi/c Jz)
// ASSUME we are fed Iz_prescribed.
//Jz = -Iz_prescribed / (real)(numEndZCurrentTriangles - numStartZCurrentTriangles);
f64 AreaMinor = p_AreaMinor[iMinor];
Jz = negative_Iz_per_triangle / AreaMinor;
// printf("temp.Azdot %1.10E ", temp.Azdot);
temp.Azdot += h_use*c*FOUR_PI*Jz; // Iz would come from multiplying back by area and adding.
// printf("%d Iz %1.14E Area %1.14E Jz %1.14E Azdot %1.14E \n",
// iMinor,
// negative_Iz_per_triangle, AreaMinor, Jz, temp.Azdot);
};
}
// + h_use * ROCAzdot_antiadvect // == 0
p_AAzdot_out[iMinor] = temp;
// Would rather make this a separate routine beforehand.
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz + data_1.viz
// - data_k.vez - data_1.vez));
//data_1.Azdot = data_k.Azdot
// + h_use * ROCAzdot_antiadvect + h_use * c*c*(Lap_Az +
// 0.5*FOURPI_OVER_C * q*data_use.n*(data_k.viz - data_k.vez));
// intermediate
}*/
__global__ void kernelCreateEpsilonAndJacobi_Heat
(
f64 const h_sub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_T_n,
f64 * __restrict__ p_T_i,
f64 * __restrict__ p_T_e,
T3 * p_Tk, // T_k for substep
NTrates * __restrict__ p_NTrates_diffusive,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p__coeffself_n, // what about dividing by N?
f64 * __restrict__ p__coeffself_i,
f64 * __restrict__ p__coeffself_e,
f64 * __restrict__ p__epsilon_n,
f64 * __restrict__ p__epsilon_i,
f64 * __restrict__ p__epsilon_e,
f64 * __restrict__ p__Jacobi_n,
f64 * __restrict__ p__Jacobi_i,
f64 * __restrict__ p__Jacobi_e ,
bool * __restrict__ p_bFailedTest,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskblock,
bool bUseMask
)
{
// 2. Calculate epsilon: given the est of T, eps = T - (T_k +- h sum kappa dot grad T)
// So this is a lot like saying, let's call the actual routine...
// except we also want Jacobi which means we also want coeff on self in epsilon.
// eps= T_putative - (T_k +- h sum kappa dot grad T_putative)
// coeff on self we want to be linearized so it incorporates the assumption that it affects kappa.
// deps/dT = sum [[dkappa/dT = 0.5 kappa/T] dot grad T + kappa dot d/dT grad T]
// However this means if we know kappa dot grad T then we can * by 0.5/T to get dkappa/dT part
// But we had to collect a separate value for kappa dot d/dT grad T.
// We certainly need to somehow modify the existing kappa dot grad T routine here.
// what about dividing by N?
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
if (bUseMask) {
// if (iVertex == VERTCHOSEN) {
// printf("%d : bUseMask %d p_bMaskblock[blockIdx.x] %d blockIdx.x %d\n",
// iVertex, (bUseMask) ? 1 : 0, (p_bMaskblock[blockIdx.x]) ? 1 : 0, blockIdx.x);
// }
if (p_bMaskblock[blockIdx.x] == 0) return;
}
bool bMask[3];
if (bUseMask) {
//memcpy(bMask, p_bMask3 + 3 * iVertex, sizeof(bool) * 3);
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + NUMVERTICES*2];
// if (iVertex == VERTCHOSEN) {
// printf("%d : bUseMask %d p_bMask[2] %d \n",
// iVertex, (bUseMask) ? 1 : 0, (bMask[2]) ? 1 : 0);
// }
// We need to re-do into species anyway. Afterwards.
if ((bMask[0] == 0) &&
(bMask[1] == 0) &&
(bMask[2] == 0)) return; // do not modify anything
}
structural info = p_info_major[iVertex];
if ((info.flag == DOMAIN_VERTEX)) { //|| (info.flag == OUTERMOST)) {
NTrates Rates = p_NTrates_diffusive[iVertex];
nvals n = p_n_major[iVertex];
f64 Area = p_AreaMajor[iVertex];
f64 N = n.n*Area;
f64 Nn = n.n_n*Area;
f64 Tn, Ti, Te, actual_Tn, actual_Ti, actual_Te, epsilon_n, epsilon_i, epsilon_e;
T3 T_k;
memcpy(&T_k, &(p_Tk[iVertex]), sizeof(T3));
if ((bUseMask == 0) || (bMask[0])) {
Tn = p_T_n[iVertex];
actual_Tn = T_k.Tn + (h_sub / Nn)*Rates.NnTn;
epsilon_n = Tn - actual_Tn;
// Try this:
p__Jacobi_n[iVertex] = -(h_sub / sqrt(Nn))*Rates.NnTn / p__coeffself_n[iVertex]; // should never be 0
epsilon_n *= sqrt(Nn);
p__epsilon_n[iVertex] = epsilon_n;
} else {
epsilon_n = 0.0;
actual_Tn = 0.0; // not used
}
if ((bUseMask == 0) || (bMask[1])) {
Ti = p_T_i[iVertex];
actual_Ti = T_k.Ti + (h_sub / N)*Rates.NiTi;
epsilon_i = Ti - actual_Ti;
// Try this:
p__Jacobi_i[iVertex] = -(h_sub / sqrt(N))*Rates.NiTi / p__coeffself_i[iVertex];
// Weighted Least Squares:
epsilon_i *= sqrt(N);
p__epsilon_i[iVertex] = epsilon_i;
} else {
epsilon_i = 0.0;
actual_Ti = 0.0; // not used
};
if ((bUseMask == 0) || (bMask[2])) {
Te = p_T_e[iVertex];
actual_Te = T_k.Te + (h_sub / N)*Rates.NeTe;
epsilon_e = Te - actual_Te;
// Try this:
p__Jacobi_e[iVertex] = -(h_sub / sqrt(N))*Rates.NeTe / p__coeffself_e[iVertex];
epsilon_e *= sqrt(N);
p__epsilon_e[iVertex] = epsilon_e;
//if ((iVertex == VERTCHOSEN))
// printf("iVertex %d Te %1.10E actual_Te %1.9E Tk %1.9E Rates %1.10E epsilon %1.11E Jacobi %1.10E\n",
// iVertex, Te, actual_Te, T_k.Te, Rates.NeTe, epsilon_e, p__Jacobi_e[iVertex]);
} else {
epsilon_e = 0.0;
actual_Te = 0.0; // not used
}
// If sqrt N we care about is 1e4 and T we care about is 1e-14 then we get 1e-10 as the sqrt(N)T to add to create absolute threshold
if (p_bFailedTest != 0) {
if ((epsilon_n*epsilon_n > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Tn*actual_Tn*Nn + 1.0e-10*1.0e-10))
||
(epsilon_i*epsilon_i > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Ti*actual_Ti*N + 1.0e-10*1.0e-10))
||
(epsilon_e*epsilon_e > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Te*actual_Te*N + 1.0e-10*1.0e-10))
)
p_bFailedTest[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
}
//if (p_bFailedTest != 0) {
// if ((epsilon_n*epsilon_n > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Tn*actual_Tn*Nn + 1.0e-30)) ||
// (epsilon_i*epsilon_i > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Ti*actual_Ti*N + 1.0e-30)) ||
// (epsilon_e*epsilon_e > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_Te*actual_Te*N + 1.0e-30)) ||
// (actual_Tn < 0.0) || (actual_Ti < 0.0) || (actual_Te < 0.0))
// p_bFailedTest[blockIdx.x] = true;
//}
// It may be T<0 that is the probs, given that we have arbitrary strength of B-pull on some edge.
// 1e-28 = 1e-14 1e-14 so that's small. Up to 1e-22 = 1e-9 1e-14.
// 1e-8 T (so 1e-16 TT) is comparatively quite large -- just past single precision.
// That seems about right for now.
} else {
p__epsilon_n[iVertex] = 0.0;
p__epsilon_i[iVertex] = 0.0;
p__epsilon_e[iVertex] = 0.0;
p__Jacobi_n[iVertex] = 0.0;
p__Jacobi_i[iVertex] = 0.0;
p__Jacobi_e[iVertex] = 0.0;
};
}
__global__ void kernelMultiplyVector(
f64 * __restrict__ p_multiply,
f64 const factor)
{
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
p_multiply[iVertex] *= factor;
}
__global__ void kernelCreateEpsilonAndJacobi_Heat_1species
(
f64 const h_sub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_T,
f64 * p_Tk, // T_k for substep
NTrates * __restrict__ p_NTrates_diffusive,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p__coeffself,
f64 * __restrict__ p__epsilon,
f64 * __restrict__ p__Jacobi,
bool * __restrict__ p_bFailedTest,
bool * __restrict__ p_bMask,
bool * __restrict__ p_bMaskblock,
bool bUseMask,
int species,
bool bIncorporateEps
)
{
// 2. Calculate epsilon: given the est of T, eps = T - (T_k +- h sum kappa dot grad T)
// So this is a lot like saying, let's call the actual routine...
// except we also want Jacobi which means we also want coeff on self in epsilon.
// eps= T_putative - (T_k +- h sum kappa dot grad T_putative)
// coeff on self we want to be linearized so it incorporates the assumption that it affects kappa.
// deps/dT = sum [[dkappa/dT = 0.5 kappa/T] dot grad T + kappa dot d/dT grad T]
// However this means if we know kappa dot grad T then we can * by 0.5/T to get dkappa/dT part
// But we had to collect a separate value for kappa dot d/dT grad T.
// We certainly need to somehow modify the existing kappa dot grad T routine here.
// what about dividing by N?
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
if (TESTHEAT) printf("%d bUseMask %d info.flag %d \n",
iVertex, (bUseMask ? 1 : 0), p_info_major[iVertex].flag);
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
if (bUseMask) {
if (p_bMask[iVertex] == 0) return; // do not modify anything
}
structural info = p_info_major[iVertex];
if ((info.flag == DOMAIN_VERTEX)) { //|| (info.flag == OUTERMOST)) {
NTrates Rates = p_NTrates_diffusive[iVertex];
nvals n = p_n_major[iVertex];
f64 Area = p_AreaMajor[iVertex];
f64 T, actual_T, epsilon;
f64 T_k = p_Tk[iVertex];
f64 N = (species != 0)?(n.n*Area) : (n.n_n*Area);
T = p_T[iVertex];
if (species == 0) actual_T = T_k + (h_sub / N)*Rates.NnTn;
if (species == 1) actual_T = T_k + (h_sub / N)*Rates.NiTi;
if (species == 2) actual_T = T_k + (h_sub / N)*Rates.NeTe;
epsilon = T - actual_T;
p__epsilon[iVertex] = epsilon;
if (bIncorporateEps) {
p__Jacobi[iVertex] = -epsilon / p__coeffself[iVertex]; // should never be 0 // match the other function for a minute
} else {
p__Jacobi[iVertex] = -actual_T;
// Try just doing Richardson beyond the 1st regressor.
}
if (TESTHEAT) printf("%d : T %1.10E T_k %1.10E epsilon %1.10E d/dt NiTi %1.10E hsub/N %1.10E coeffself %1.10E Jacobi %1.10E \n",
iVertex, T, T_k, epsilon, Rates.NiTi, h_sub/N, p__coeffself[iVertex], p__Jacobi[iVertex]);
if (p_bFailedTest != 0) {
if (epsilon*epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_T*actual_T + 1.0e-14*1.0e-14))
p_bFailedTest[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
}
} else {
p__epsilon[iVertex] = 0.0;
p__Jacobi[iVertex] = 0.0;
};
}
__global__ void kernelCreateEpsilonHeat_1species
(
f64 const h_sub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_T,
f64 * __restrict__ p_Tk, // T_k for substep
NTrates * __restrict__ p_NTrates_diffusive,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p__epsilon,
bool * __restrict__ p_bFailedTest,
bool * __restrict__ p_bMask,
bool * __restrict__ p_bMaskblock,
bool bUseMask,
int species
)
{
// 2. Calculate epsilon: given the est of T, eps = T - (T_k +- h sum kappa dot grad T)
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
if (bUseMask) {
if (p_bMask[iVertex] == 0) return; // do not modify anything
}
structural info = p_info_major[iVertex];
if ((info.flag == DOMAIN_VERTEX)) { //|| (info.flag == OUTERMOST)) {
NTrates Rates = p_NTrates_diffusive[iVertex];
nvals n = p_n_major[iVertex];
f64 Area = p_AreaMajor[iVertex];
f64 T, actual_T, epsilon;
f64 T_k = p_Tk[iVertex];
f64 N = (species != 0) ? (n.n*Area) : (n.n_n*Area);
T = p_T[iVertex];
if (species == 0) actual_T = T_k + (h_sub / N)*Rates.NnTn;
if (species == 1) actual_T = T_k + (h_sub / N)*Rates.NiTi;
if (species == 2) actual_T = T_k + (h_sub / N)*Rates.NeTe;
#if SQRTNT
epsilon = sqrt(N)*(T - actual_T);
#if TESTHEAT
if (iVertex == VERTCHOSEN)
printf("%d epsilon %1.14E sqrtN %1.10E T %1.12E T_k %1.12E hsub/N %1.12E dbydt{NnTn} %1.12E\n",
iVertex, epsilon, sqrt(N), T, T_k, h_sub / N, Rates.NnTn);
#endif // TESTHEAT
#else
epsilon = T - actual_T;
#endif
p__epsilon[iVertex] = epsilon;
if (p_bFailedTest != 0) {
//if (epsilon*epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_T*actual_T*N + 1.0e-10*1.0e-10))
// p_bFailedTest[blockIdx.x] = true;
// Why 1.0e-10 in absolute error, for minimum value we care about:
// N = 2.0e12*7e-5 = 1e8
// root N = 1e4
// root N * 1e-14 erg = 1e-10 for (root N) T
// 2021 YEP self, I agree. But it's 1e-10 per 1e-6s.
#if SQRTNT
if (epsilon*epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*N*(T - T_k)*(T-T_k)
+ h_sub*4.0e-4*h_sub*4.0e-4 // absolute deviation from trajectory
+ 1.0e-25*N*T*T_k // floating point error allowance
// Note -- if the last term is negative because T<0 then this almost guarantees fail test.
) {
p_bFailedTest[blockIdx.x] = true;
};
#if TESTHEAT
//if (iVertex == VERTCHOSEN) {
// printf("iVertex %d epsilonsq %1.14E RELPPN %1.9E rel threshold %1.14E\n"
// "absolute threshold %1.14E Floating-point threshold %1.14E threshold total %1.14E \n",
// iVertex, epsilon*epsilon, REL_THRESHOLD_HEAT, REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*N*(T - T_k),
// h_sub*4.0e-4*h_sub*4.0e-4, // absolute deviation from trajectory
// 1.0e-25*N*T*T_k,
// REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*N*(T - T_k)
// + h_sub*4.0e-4*h_sub*4.0e-4 // absolute deviation from trajectory
// + 1.0e-25*N*T*T_k
// );
//};
#endif
#else
// NO ROOT N INVOLVED:
if (epsilon*epsilon > REL_THRESHOLD_HEAT*REL_THRESHOLD_HEAT*(actual_T*actual_T + 4.0e-14*4.0e-14))
p_bFailedTest[blockIdx.x] = true;
#endif
}
}
else {
p__epsilon[iVertex] = 0.0;
};
}
__global__ void kernelDivideBySqrtN(
f64 * __restrict__ p_regr,
nvals * __restrict__ p_n,
f64 * __restrict__ p_Area,
int const iSpecies
)
{
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
nvals nnn = p_n[iVertex];
f64 n;
if (iSpecies == 0) {
n = nnn.n_n;
} else {
n = nnn.n;
};
if (n > 0.0) {
f64 N = n*p_Area[iVertex];
p_regr[iVertex] /= sqrt(N);
}
}
__global__ void AggregateSmashMatrix(
f64 * __restrict__ p_Jacobianesque_list,
f64 * __restrict__ p_eps,
f64 * __restrict__ p_smash_matrix_block,
f64 * __restrict__ p_smash_vector_block
) {
__shared__ f64 smash_collect[SQUASH_POINTS*threadsPerTileMajor];
__shared__ f64 Jacobian_data[SQUASH_POINTS*threadsPerTileMajor];
// 1. Load in d eps/ d beta
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
memcpy(&(Jacobian_data[threadIdx.x*SQUASH_POINTS]),
&(p_Jacobianesque_list[iMinor*SQUASH_POINTS]), sizeof(f64)*SQUASH_POINTS);
// 2. Loop:
int j;
#pragma unroll
for (int i = 0; i < SQUASH_POINTS; i++)
{
f64 use = Jacobian_data[threadIdx.x*SQUASH_POINTS + i];
for (j = 0; j < SQUASH_POINTS; j++)
smash_collect[threadIdx.x*SQUASH_POINTS + j] = use*Jacobian_data[threadIdx.x*SQUASH_POINTS + j];
// Now add up row vectors we got:
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
#pragma unroll
for (int y = 0; y < SQUASH_POINTS; y++)
smash_collect[threadIdx.x*SQUASH_POINTS + y] += smash_collect[(threadIdx.x + k)*SQUASH_POINTS + y];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
for (int y = 0; y < SQUASH_POINTS; y++)
smash_collect[threadIdx.x*SQUASH_POINTS + y] += smash_collect[(threadIdx.x + s - 1)*SQUASH_POINTS + y];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
memcpy(&(p_smash_matrix_block[blockIdx.x*SQUASH_POINTS*SQUASH_POINTS + i*SQUASH_POINTS]),
smash_collect, sizeof(f64)*SQUASH_POINTS);
};
// And eps vs deps?
f64 epsilon = p_eps[iMinor];
for (j = 0; j < SQUASH_POINTS; j++)
smash_collect[threadIdx.x*SQUASH_POINTS + j] = epsilon*Jacobian_data[threadIdx.x*SQUASH_POINTS + j];
// Now add up row vectors we got:
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
#pragma unroll
for (int y = 0; y < SQUASH_POINTS; y++)
smash_collect[threadIdx.x*SQUASH_POINTS + y] += smash_collect[(threadIdx.x + k)*SQUASH_POINTS + y];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
for (int y = 0; y < SQUASH_POINTS; y++)
smash_collect[threadIdx.x*SQUASH_POINTS + y] += smash_collect[(threadIdx.x + s - 1)*SQUASH_POINTS + y];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
memcpy(&(p_smash_vector_block[blockIdx.x*SQUASH_POINTS]), smash_collect, sizeof(f64)*SQUASH_POINTS);
}
__global__ void kernelCreateEpsilon_Heat_for_Jacobi
(
f64 const h_sub,
structural * __restrict__ p_info_major,
f64 * __restrict__ p_T_n,
f64 * __restrict__ p_T_i,
f64 * __restrict__ p_T_e,
T3 * p_Tk, // T_k for substep
NTrates * __restrict__ p_NTrates_diffusive,
nvals * __restrict__ p_n_major,
f64 * __restrict__ p_AreaMajor,
f64 * __restrict__ p_eps_n,
f64 * __restrict__ p_eps_i,
f64 * __restrict__ p_eps_e,
bool * __restrict__ p_bMask3,
bool * __restrict__ p_bMaskblock,
bool bUseMask
)
{
if ((bUseMask) && (p_bMaskblock[blockIdx.x] == 0)) return;
long const iVertex = blockDim.x*blockIdx.x + threadIdx.x;
bool bMask[3];
if (bUseMask) {
bMask[0] = p_bMask3[iVertex];
bMask[1] = p_bMask3[iVertex + NUMVERTICES];
bMask[2] = p_bMask3[iVertex + NUMVERTICES * 2];
if ((bMask[0] == 0) && (bMask[1] == 0) && (bMask[2] == 0)) return;
};
structural info = p_info_major[iVertex];
if (iVertex == VERTCHOSEN) printf("%d : bMask[2] %d info.flag %d \n",
iVertex, (bMask[2]) ? 1 : 0, info.flag);
if ((info.flag == DOMAIN_VERTEX)) { //|| (info.flag == OUTERMOST)) {
T3 T_k;
f64 Tn, Ti, Te;
memcpy(&T_k, &(p_Tk[iVertex]), sizeof(T3));
NTrates Rates = p_NTrates_diffusive[iVertex];
nvals n = p_n_major[iVertex];
f64 Area = p_AreaMajor[iVertex];
f64 N = n.n*Area;
f64 Nn = n.n_n*Area;
if ((bUseMask == 0) || (bMask[2])) {
Te = p_T_e[iVertex];
f64 actual_Te = T_k.Te + (h_sub / N)*Rates.NeTe;
f64 epsilon_e = Te - actual_Te;
epsilon_e *= sqrt(N);
p_eps_e[iVertex] = epsilon_e;
// if ((iVertex == VERTCHOSEN)) printf("%d : Te %1.9E actual %1.9E Tk %1.9E rates %1.9E epsilon %1.11E \n",
// iVertex,
// Te, actual_Te, T_k.Te, Rates.NeTe, epsilon_e);
};
if ((bUseMask == 0) || (bMask[0])) {
Tn = p_T_n[iVertex];
f64 actual_Tn = T_k.Tn + (h_sub / Nn)*Rates.NnTn;
f64 epsilon_n = Tn - actual_Tn;
epsilon_n *= sqrt(Nn);
p_eps_n[iVertex] = epsilon_n;
};
if ((bUseMask == 0) || (bMask[1])) {
Ti = p_T_i[iVertex];
f64 actual_Ti = T_k.Ti + (h_sub / N)*Rates.NiTi;
f64 epsilon_i = Ti - actual_Ti;
// Weighted Least Squares:
epsilon_i *= sqrt(N);
p_eps_i[iVertex] = epsilon_i;
// if (iVertex == VERTCHOSEN)
// printf("iVertex %d actual_Ti %1.9E Tk %1.9E Rates %1.10E epsilon %1.10E\n",
// iVertex, actual_Ti, T_k.Ti, Rates.NiTi, epsilon_i);
};
};
// NOT NEEDED: ENSURE WE SET EPS TO 0 FIRST INSTEAD.
// else p_eps_n[iVertex] = 0.0;
// p_eps_i[iVertex] = 0.0;
//p_eps_e[iVertex] = 0.0;
}
__global__ void kernelCreateEpsilonAndJacobi(
f64 const h_use,
structural * __restrict__ p_info,
f64 * __restrict__ p_Az_array_next,
f64 * __restrict__ p_Az_array,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma,
f64 * __restrict__ p_LapCoeffSelf,
f64 * __restrict__ p_Lap_Aznext,
f64 * __restrict__ p_epsilon,
f64 * __restrict__ p_Jacobi_x,
bool * __restrict__ p_bFail)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 eps;
structural info = p_info[iMinor];
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL))
{
eps = 0.0; // p_Lap_Aznext[iMinor];
p_Jacobi_x[iMinor] = 0.0; // -eps / p_LapCoeffSelf[iMinor];
// if (iMinor == 0) printf("\nGPU: eps[0] %1.14E LapCoeffself %1.14E \n", eps, p_LapCoeffSelf[iMinor]);
// but we reset it in ResetFrills called for regressor
}
else {
#ifdef MIDPT_A_AND_ACTUALLY_MIDPT_A_NOT_JUST_EFFECT_ON_AZDOT
// WE COULD CHOOSE to leave it so that Az advances with Azdot_k+1 : we don't know a reason why not.
eps = p_Az_array_next[iMinor] - p_Az_array[iMinor]
- h_use * p_gamma[iMinor] * p_Lap_Aznext[iMinor]
- h_use * p_Azdot0[iMinor];
p_Jacobi_x[iMinor] = -eps / (1.0 - h_use * p_gamma[iMinor] * p_LapCoeffSelf[iMinor]);
#else
f64 Aznext = p_Az_array_next[iMinor];
f64 gamma = p_gamma[iMinor];
eps = Aznext - p_Az_array[iMinor] - h_use * gamma * p_Lap_Aznext[iMinor] - h_use*p_Azdot0[iMinor];
// if (iMinor == VERTCHOSEN + BEGINNING_OF_CENTRAL) {
// printf("iMinor %d eps %1.9E Aznext %1.9E Azk? %1.9E h_use %1.9E gamma %1.9E LapAz %1.9E Azdot0 %1.9E\n",
// iMinor, eps, Aznext, p_Az_array[iMinor], h_use, gamma, p_Lap_Aznext[iMinor], p_Azdot0[iMinor]);
// }
p_Jacobi_x[iMinor] = -eps / (1.0 - h_use * gamma * p_LapCoeffSelf[iMinor]);
if (p_Jacobi_x[iMinor] != p_Jacobi_x[iMinor]) printf("p_Jacobi_x[%d] was NaN : eps %1.9E gamma %1.9E LCS %1.9E LapAznext %1.9E Azdot0 %1.9E Aznext %1.9E\n",
iMinor, eps, gamma, p_LapCoeffSelf[iMinor], p_Lap_Aznext[iMinor], p_Azdot0[iMinor], Aznext);
// if (iMinor == 32641) printf("32641: eps %1.9E Az %1.12E Azk %1.12E h %1.10E gamma %1.10E LapAz %1.12E "
// "h Azdot0 %1.10E\n",
// eps, p_Az_array_next[iMinor], p_Az_array[iMinor],
// h_use,gamma,
// p_Lap_Aznext[iMinor],
// h_use*p_Azdot0[iMinor]);
#endif
// if (iMinor == 25526) printf("\n\n########\nJacobi_x 25526 GPU: %1.14E eps %1.14E gamma %1.14E LapCoeffself %1.14E\n",
// p_Jacobi_x[iMinor], eps, p_gamma[iMinor], p_LapCoeffSelf[iMinor]);
// if (iMinor == 86412) printf("Jacobi_x 86412 GPU: %1.14E eps %1.14E gamma %1.14E LapCoeffself %1.14E\n",
// p_Jacobi_x[iMinor], eps, p_gamma[iMinor], p_LapCoeffSelf[iMinor]);
// if (iMinor == 69531) printf("Jacobi_x 69531 GPU: %1.14E eps %1.14E gamma %1.14E LapCoeffself %1.14E\n",
// p_Jacobi_x[iMinor], eps, p_gamma[iMinor], p_LapCoeffSelf[iMinor]);
// Typical value for Az is like 100+ so use 0.1 as minimum that we care about, times relthresh.
if (eps*eps > RELTHRESH_AZ_d*RELTHRESH_AZ_d*(Aznext*Aznext + 10.0*10.0)) p_bFail[blockIdx.x] = true;
// This does not seem to be triggering.
};
p_epsilon[iMinor] = eps;
}
__global__ void kernelCreateExplicitStepAz(
f64 const hsub,
f64 * __restrict__ pAzdot0,
f64 * __restrict__ pgamma,
f64 * __restrict__ pLapAz, // we based this off of half-time Az.
f64 * __restrict__ p_result) // = h (Azdot0 + gamma*LapAz)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
p_result[iMinor] = hsub*(pAzdot0[iMinor] + pgamma[iMinor] * pLapAz[iMinor]);
}
__global__ void kernelCreateEpsilon_Az_CG(
f64 const h_use,
structural * __restrict__ p_info,
f64 * __restrict__ p_Az_plus,
f64 * __restrict__ p_Az_k,
f64 * __restrict__ p_Azdot0,
f64 * __restrict__ p_gamma,
f64 * __restrict__ p_Lap_Az,
f64 * __restrict__ p_epsilon,
f64 * __restrict__ p__sqrtfactor,
bool * __restrict__ p_bFail,
bool const bSaveFail)
{
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
f64 eps;
structural info = p_info[iMinor];
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL))
{
eps = 0.0; // p_Lap_Aznext[iMinor];
}
else {
// WE COULD CHOOSE to leave it so that Az advances with Azdot_k+1 : we don't know a reason why not.
f64 sqrtfactor = p__sqrtfactor[iMinor];
f64 one_over_sqrt;
if (sqrtfactor != 0.0) {
one_over_sqrt = 1.0 / sqrtfactor;
} else {
one_over_sqrt = 1.0;
};
f64 Aznext = p_Az_plus[iMinor];
eps = one_over_sqrt*(Aznext - p_Az_k[iMinor] - h_use * p_Azdot0[iMinor])
- sqrtfactor * p_Lap_Az[iMinor]; // notice this is the integrated Lap //
// eps^2 sqrtfactor sqrtfactor = original eps squared.
if (bSaveFail)
if (eps*eps*sqrtfactor*sqrtfactor > 1.0e-10*1.0e-10*(Aznext*Aznext + 1.0*1.0))
p_bFail[blockIdx.x] = true;
// An optimization is probably to store values in shared then amalgamate, send data to global on 1 thread. ?
if (eps != eps) printf("iMinor %d eps %1.9E Aznext %1.9E gamma %1.9E sqrtfactor %1.9E over %1.9E info.flag %d LapAz %1.9E Azdot0 %1.9E\n",
iMinor, eps, Aznext, p_gamma[iMinor], sqrtfactor, one_over_sqrt, info.flag, p_Lap_Az[iMinor], p_Azdot0[iMinor]);
};
p_epsilon[iMinor] = eps;
}
__global__ void kernelSetZero(
f64 * __restrict__ data
) {
long const index = blockDim.x*blockIdx.x + threadIdx.x;
data[index] = 0.0;
}
__global__ void kernelCreate_further_regressor(
structural * __restrict__ p_info,
f64 h_use,
f64 * __restrict__ p_regressor,
f64 * __restrict__ p_Lap_regressor,
f64 * __restrict__ p_LapCoeffSelf,
f64 * __restrict__ p_gamma,
f64 * __restrict__ p_regressor2)
{
long const index = blockDim.x*blockIdx.x + threadIdx.x;
/*
f64 d_eps_by_d_beta;
structural info = p_info[index];
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL))
{
d_eps_by_d_beta = 0.0; // Lap_Jacobi[iMinor]; // try ignoring
// Need to fill this in afterwards by ResetFrills?
}
else {
d_eps_by_d_beta = (p_regressor[index] - h_use * p_gamma[index] * p_Lap_regressor[index]);
};
p_regressor2[index] = d_eps_by_d_beta / (1.0 - h_use * p_gamma[index] * p_LapCoeffSelf[index]);*/
// Try just this instead:
p_regressor2[index] = p_gamma[index] * p_Lap_regressor[index]; // d_eps_by_d_beta / (1.0 - h_use * p_gamma[index] * p_LapCoeffSelf[index]);
}
__global__ void kernelGetLap_minor(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
// __shared__ f64 sum1[threadsPerTileMinor];
// __shared__ f64 sum2[threadsPerTileMinor];
// __shared__ f64 sum3[threadsPerTileMinor];
// 4.5 per thread.
// Not clear if better off with L1 or shared mem in this case?? Probably shared mem.
// For now, stick with idea that vertices have just major indices that come after tris.
// Minor indices are not made contiguous - although it might be better ultimately.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos, integ_grad_Az;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64_vec2 store_centroid = opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 store_first_point = endpt0;
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) iend = tri_len - 2;
// Bear in mind for OUTERMOST, the triangles go clockwise not anticlockwise.
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
if (TESTLAP) printf("vertex %d endpt0 %1.9E %1.9E projendpt0 %1.9E %1.9E \n",
iVertex, endpt0.x, endpt0.y, projendpt0.x, projendpt0.y);
if (TESTLAP) printf("%d Innermost: AreaMinor += %1.10E AreaMinor %1.10E \n",
iVertex, (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x, AreaMinor);
};
// if (info.flag == OUTERMOST) {
// printf("DEBUG: iVertex %d info.neigh_len %d iend %d izTri[0] %d izTri[iend-1] %d izTri[iend-2] %d "
// "flags 0 %d 1 %d 2 %d 3 %d 4 %d 5 %d\n"
// "positions 01234 (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) \n"
// , iVertex, info.neigh_len,
// iend, izTri[0], izTri[iend - 1],
// izTri[iend - 2],
// p_info[izTri[0]].flag, p_info[izTri[1]].flag, p_info[izTri[2]].flag,
// p_info[izTri[3]].flag, p_info[izTri[4]].flag,
// p_info[izTri[0]].pos.x, p_info[izTri[0]].pos.y, p_info[izTri[1]].pos.x, p_info[izTri[1]].pos.y,
// p_info[izTri[2]].pos.x, p_info[izTri[2]].pos.y, p_info[izTri[3]].pos.x, p_info[izTri[3]].pos.y,
// p_info[izTri[4]].pos.x, p_info[izTri[4]].pos.y
// );
//
// if (DIRICHLET == false) {
// endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
// }
// else {
// f64 radius = info.pos.modulus();
// endpt0.project_to_radius(projendpt0,
// 0.5*(FRILL_CENTROID_OUTER_RADIUS_d + radius)); // back of cell for Lap purposes
// // flatten the cell to get wall halfway out to 0 line.
// }
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// We should always call ResetFrillsAz first on the argument, so that if next is a frill then
// we got the correct value as nextAz.
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// Indicates we think 1 is anticlockwise from 0. For OUTERMOST, it's pointing IN so the rest must do too, then we divide out the minus.
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
//if (TESTLAP) printf("vertex %d endpt0 %1.9E %1.9E endpt1 %1.9E %1.9E Area += %1.10E edge_normal.x %1.9E\n",
// iVertex, endpt0.x, endpt0.y, endpt1.x, endpt1.y,
// (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x, edge_normal.x);
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E prevAz %1.8E nextAz %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E Area_quad %1.8E\n",
iVertex, i, izTri[i],
ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral,
area_quadrilateral);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
iprev = i;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
if (TESTLAP) printf("vertex %d endpt1 %1.9E %1.9E projendpt1 %1.9E %1.9E \n",
iVertex, endpt1.x, endpt1.y, projendpt1.x, projendpt1.y);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
if (TESTLAP) printf("vertex %d Innermost: AreaMinor += %1.10E \n",
iVertex, (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x);
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
if (TESTLAP) printf(" vertex %d Innermost: AreaMinor += %1.10E AreaMinor %1.10E \n",
iVertex, (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x,
AreaMinor);
}
if (info.flag == OUTERMOST)
{
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
oppAz = 0.0;
nextAz = 0.0;
if (RADIALDECLINE) {
info.pos.project_to_radius(nextpos, info.pos.modulus() + (FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus())*1.16);
endpt1 = THIRD*(opppos + info.pos + nextpos);
oppAz = prevAz*(prevpos.modulus() / opppos.modulus());
nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
};
if (!RADIALDECLINE) {
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) {
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d ourAz %1.8E oppAz %1.8E prev %1.8E next %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E edgenormal %1.8E %1.8E\n",
iVertex, ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral, edge_normal.x, edge_normal.y);
}
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
// NOW WE ARE GOING TO LOOK OUTWARDS
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
nextAz = 0.0;
endpt1 = THIRD*(opppos + info.pos + nextpos);
if (RADIALDECLINE) {
//This was incorrect
nextAz = p_Az[izTri[0]]*(store_centroid.modulus()/nextpos.modulus());
}
if (!RADIALDECLINE) {
// map radially inwards so that radius is halfway out to the zero arc.
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) {
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d ourAz %1.8E oppAz %1.8E prev %1.8E next %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E edgenormal %1.8E %1.8E\n",
iVertex, ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral, edge_normal.x, edge_normal.y);
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
// WE ARE GOING TO LOOK NORTHEAST
endpt1 = store_first_point;
nextAz = p_Az[izTri[0]];
nextpos = p_info[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) {
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d ourAz %1.8E oppAz %1.8E prev %1.8E next %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E edgenormal %1.8E %1.8E\n",
iVertex, ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral, edge_normal.x, edge_normal.y);
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
if (TESTLAP) printf("LapAz_integ %1.10E AreaMinor %1.10E LapAz %1.10E \n", Our_integral_Lap_Az, AreaMinor,
Our_integral_Lap_Az / AreaMinor);
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
// p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor; // reset just because otherwise we're inconsistent about area/position in a subcycle
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
}
else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
if (prevpos.dot(prevpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
prevAz = ourAz*(info.pos.modulus() / prevpos.modulus());
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
if (opppos.dot(opppos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
oppAz = ourAz*(info.pos.modulus() / opppos.modulus());
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
if (nextpos.dot(nextpos) > 0.999999*0.999999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
}
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
// This shouldn't be necessary anyway but is especially no good if it's not meant to be flat
if (
((opppos.dot(opppos) < 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) || (DIRICHLET) ||
(RADIALDECLINE)) &&
(opppos.dot(opppos) > 1.00001*1.00001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d)
)
{
// neighbour's not a frill, or it's Dirichlet or radial decline looking outwards.
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if ((TESTLAP2) || (Our_integral_Lap_Az != Our_integral_Lap_Az)) {
printf("iMinor %d [i] %d ourAz %1.9E theirs %1.9E prev %1.9E next %1.9E numer %1.9E contrib %1.10E areaquad %1.8E\n",
iMinor, izNeighMinor[i], ourAz, oppAz, prevAz, nextAz,
integ_grad_Az.dot(edge_normal),
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
area_quadrilateral);
};
}
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
// p_AreaMinor[iMinor] = AreaMinor; // reset for each substep
};
}
__global__ void kernelGetLap_minor_SYMMETRIC(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ p_AreaMinor, // need to save off to multiply back for symmetry
bool const bDivideByArea
)
{
// Symmetric version with circumcenters to define corners of minor cells
// so as to use conjugate gradient for Az.
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos, integ_grad_Az;
f64_vec2 endpt0, endpt1;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
printf("don't call this routine unless the mess is reformed so that drawing triangles between these points will actually produce a Delaunay triangulation. Because if it doesn't, circumcenters aren't in triangles and it will be nonsense.\n");
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
// prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
// prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64_vec2 store_centroid = opppos;
// endpt0 = THIRD * (info.pos + opppos + prevpos);
CalculateCircumcenter(&endpt0, info.pos, opppos, prevpos);
f64_vec2 store_first_point = endpt0;
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) iend = tri_len - 2;
// Bear in mind for OUTERMOST, the triangles go clockwise not anticlockwise.
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
// if (info.flag == OUTERMOST) {
// printf("DEBUG: iVertex %d info.neigh_len %d iend %d izTri[0] %d izTri[iend-1] %d izTri[iend-2] %d "
// "flags 0 %d 1 %d 2 %d 3 %d 4 %d 5 %d\n"
// "positions 01234 (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) (%1.8E, %1.8E) \n"
// , iVertex, info.neigh_len,
// iend, izTri[0], izTri[iend - 1],
// izTri[iend - 2],
// p_info[izTri[0]].flag, p_info[izTri[1]].flag, p_info[izTri[2]].flag,
// p_info[izTri[3]].flag, p_info[izTri[4]].flag,
// p_info[izTri[0]].pos.x, p_info[izTri[0]].pos.y, p_info[izTri[1]].pos.x, p_info[izTri[1]].pos.y,
// p_info[izTri[2]].pos.x, p_info[izTri[2]].pos.y, p_info[izTri[3]].pos.x, p_info[izTri[3]].pos.y,
// p_info[izTri[4]].pos.x, p_info[izTri[4]].pos.y
// );
//
// if (DIRICHLET == false) {
// endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
// }
// else {
// f64 radius = info.pos.modulus();
// endpt0.project_to_radius(projendpt0,
// 0.5*(FRILL_CENTROID_OUTER_RADIUS_d + radius)); // back of cell for Lap purposes
// // flatten the cell to get wall halfway out to 0 line.
// }
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// Symmetric, with circumcenters:
// normal_gradient = (oppAz - ourAz) / ((opppos - info.pos).modulus());
// Our_integral_Lap_Az += normal_gradient*edge_normal.modulus();
// Reduce number of square roots:
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
if (Our_integral_Lap_Az != Our_integral_Lap_Az) printf("%d oppAz %1.8E ourAz %1.8E edge_normal.dot(en) %1.8E opposdot %1.8E \n",
iMinor, oppAz, ourAz, edge_normal.dot(edge_normal), ((opppos - info.pos).dot(opppos - info.pos)));
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E delta_out %1.8E delta_edge %1.8E\n",
iVertex, i, izTri[i],
ourAz, oppAz,
(opppos - info.pos).modulus(),
edge_normal.modulus()
);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
iprev = i;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
}
if (info.flag == OUTERMOST)
{
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
if (!RADIALDECLINE) {
oppAz = 0.0;
nextAz = 0.0;
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos); // THIRD*(opppos + info.pos + nextpos);
}
if (RADIALDECLINE) {
info.pos.project_to_radius(nextpos, info.pos.modulus() + (FRILL_CENTROID_OUTER_RADIUS_d-info.pos.modulus())*1.16 );
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos); // THIRD*(opppos + info.pos + nextpos);
oppAz = prevAz*(prevpos.modulus() / opppos.modulus());
nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
}
// nextpos directly above our own but only on a level with the other frill centroids
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (DIRICHLET || RADIALDECLINE)
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
// "map radially inwards so that radius is halfway out to the zero arc:"
// no can do... nor should we need to since the edge is equidistant from both points that generated it.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
nextAz = 0.0;
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos); // THIRD*(opppos + info.pos + nextpos);
if (RADIALDECLINE)
nextAz = p_Az[izTri[0]] * (store_centroid.modulus() / nextpos.modulus());
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
if (DIRICHLET || (RADIALDECLINE))
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
// WE ARE GOING TO LOOK NORTHEAST
endpt1 = store_first_point;
// nextAz = p_Az[izTri[0]]; // cancelled because nextAz is not used
nextpos = p_info[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
if (DIRICHLET || (RADIALDECLINE))
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
if (Our_integral_Lap_Az != Our_integral_Lap_Az) printf(" at dirichlet oppAz %1.8E ourAz %1.8E edge_normal.dot(en) %1.8E opposdot %1.8E \n",
oppAz, ourAz, edge_normal.dot(edge_normal), ((opppos - info.pos).dot(opppos - info.pos)));
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
// MULTIPLY!!
if (bDivideByArea) Our_integral_Lap_Az /= AreaMinor;
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az;// / AreaMinor;
// WE NO LONGER DIVIDE BY AreaMinor
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor;
if (AreaMinor < 0.0) printf("iVertex %d : AreaMinor %1.10E \n", iVertex, AreaMinor);
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
} else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
// prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
// prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
} else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
if (opppos.dot(opppos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
oppAz = ourAz*(info.pos.modulus() / opppos.modulus());
}
CalculateCircumcenter(&endpt0, info.pos, opppos, prevpos);
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
if (nextpos.dot(nextpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
}
CalculateCircumcenter(&endpt1, info.pos, opppos, nextpos);
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
//f64_vec2 integ_grad_Az;
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
//f64 area_quadrilateral = 0.5*(
// (info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
// + (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
// + (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
// + (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
// );
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
if (
((opppos.dot(opppos) < 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) || (DIRICHLET) || (RADIALDECLINE)) &&
(opppos.dot(opppos) > 1.00001*1.00001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d)
)
{
// neighbour's not a frill, or it's Dirichlet looking outwards. Or radial decline.
// Symmetric, with circumcenters:
// normal_gradient = (oppAz - ourAz) / ((opppos - info.pos).modulus());
// Our_integral_Lap_Az += normal_gradient*edge_normal.modulus();
// Reduce number of square roots:
Our_integral_Lap_Az += (oppAz - ourAz)*sqrt((edge_normal.dot(edge_normal)) / ((opppos - info.pos).dot(opppos - info.pos)));
if (Our_integral_Lap_Az != Our_integral_Lap_Az)
printf("oppAz %1.8E ourAz %1.8E edge_normal.dot(en) %1.8E opposdot %1.8E \n",
oppAz, ourAz, edge_normal.dot(edge_normal), ((opppos - info.pos).dot(opppos - info.pos)));
// Is there a cunning way to get rid of sqrt. We know that edge_normal faces the same way as opppos-info.pos...
// thus, (opppos-info.pos).dot(edge_normal) = product of moduli. Hmmmm.
}
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
//if (iMinor == 57364) printf("%d AreaMinor %1.8E contrib %1.8E, endpt0.x %1.9E endpt0.y %1.9E endpt1.x %1.9E endpt1.y %1.9E edge.x %1.8E info.pos %1.9E %1.9E oppos %1.9E %1.9E\n",
// iMinor, AreaMinor, (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x,
// endpt0.x, endpt0.y, endpt1.x, endpt1.y, edge_normal.x,
// info.pos.x, info.pos.y, opppos.x, opppos.y);
endpt0 = endpt1;
// prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
if (bDivideByArea) Our_integral_Lap_Az /= AreaMinor;
p_LapAz[iMinor] = Our_integral_Lap_Az;// / AreaMinor;
p_AreaMinor[iMinor] = AreaMinor; // reset for each substep // careful what we pass it
if (AreaMinor < 0.0) printf("%d : AreaMinor %1.10E \n", iMinor, AreaMinor);
};
}
/*__global__ void kernelGetLap_minor__sum(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_integralLapAz,
f64 * __restrict__ p_integralVT,
f64 * __restrict__ p_integralTV,
f64 * __restrict__ p_integralTT
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64 integralLapAz[threadsPerTileMinor];
__shared__ f64 integralLapVT[threadsPerTileMinor];
__shared__ f64 integralLapTV[threadsPerTileMinor];
__shared__ f64 integralLapTT[threadsPerTileMinor];
// __shared__ f64 sum1[threadsPerTileMinor];
// __shared__ f64 sum2[threadsPerTileMinor];
// __shared__ f64 sum3[threadsPerTileMinor];
// 4.5 per thread.
// Not clear if better off with L1 or shared mem in this case?? Probably shared mem.
// For now, stick with idea that vertices have just major indices that come after tris.
// Minor indices are not made contiguous - although it might be better ultimately.
integralLapAz[threadIdx.x] = 0.0;
integralLapVT[threadIdx.x] = 0.0;
integralLapTV[threadIdx.x] = 0.0;
integralLapTT[threadIdx.x] = 0.0;
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
} else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2; // We ignore frills -- how is this okay?
// It's OK for flat BC - just ignore frills always
#ifndef FLATAZBC
printf("not ok\n");
#endif
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
} else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
};
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal, integ_grad_Az;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapVT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E\n"
"pos %1.9E %1.9E prevpos %1.9E %1.9E opppos %1.9E %1.9E nextpost %1.9E %1.9E\n",
iVertex, i, izTri[i],
ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y
);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
++iprev;
}; // next i
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// Now add on the final sides to give area:
// 3 4
// 2 1 0
// endpt0=endpt1 is now the point north of edge facing 2 anyway.
f64_vec2 projendpt1;
if (info.flag == OUTERMOST) {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
} else {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
};
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// line between out-projected points
// Not sure this AreaMinor is the right one.
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
// NEW ADDITION:
#ifdef RADIALDECLINEAZBC
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / FRILL_CENTROID_OUTER_RADIUS_d;
}
#endif
#ifdef DIRICHLETAZBC
// In this case let the value beyond OUTERMOST be taken as 0 for now;
// we can then substitute the value of the 1D radial array if we can use that.
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / (2.0*(FRILL_CENTROID_OUTER_RADIUS_d-info.pos.modulus()));
}
#endif
// if it's flat do nothing
};
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor; // reset just because otherwise we're inconsistent about area/position in a subcycle
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
} else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
// This shouldn't be necessary anyway but is especially no good if it's not meant to be flat
#ifndef FLATAZBC
printf("This bit needs to change.\n");
#endif
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
// neighbour's not a frill
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (izNeighMinor[i] >= BEGINNING_OF_CENTRAL) {
integralLapTV[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
} else {
integralLapTT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
};
if (TESTTRI3) {
printf("iMinor %d i %d izNeighMinor[i] %d ourAz %1.9E theirs %1.9E contrib %1.12E \n"
"ourpos %1.9E %1.9E prev %1.9E %1.9E out %1.9E %1.9E nex %1.9E %1.9E"
"PBC %d \n",
iMinor, i, izNeighMinor[i], ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
szPBC[i]);
};
};
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iMinor] = AreaMinor; // reset for each substep
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + k];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + k];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + k];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + s - 1];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + s - 1];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + s - 1];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_integralLapAz[blockIdx.x] = integralLapAz[0];
p_integralVT[blockIdx.x] = integralLapVT[0];
p_integralTV[blockIdx.x] = integralLapTV[0];
p_integralTT[blockIdx.x] = integralLapTT[0];
}
}
*/
/*
__global__ void kernelGetLap_minor__sum_placecontribs(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_integralLapAz,
f64 * __restrict__ p_integralVT,
f64 * __restrict__ p_integralTV,
f64 * __restrict__ p_integralTT,
f64 * __restrict__ p_contriblist
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64 integralLapAz[threadsPerTileMinor];
__shared__ f64 integralLapVT[threadsPerTileMinor];
__shared__ f64 integralLapTV[threadsPerTileMinor];
__shared__ f64 integralLapTT[threadsPerTileMinor];
// __shared__ f64 sum1[threadsPerTileMinor];
// __shared__ f64 sum2[threadsPerTileMinor];
// __shared__ f64 sum3[threadsPerTileMinor];
// 4.5 per thread.
// Not clear if better off with L1 or shared mem in this case?? Probably shared mem.
// For now, stick with idea that vertices have just major indices that come after tris.
// Minor indices are not made contiguous - although it might be better ultimately.
integralLapAz[threadIdx.x] = 0.0;
integralLapVT[threadIdx.x] = 0.0;
integralLapTV[threadIdx.x] = 0.0;
integralLapTT[threadIdx.x] = 0.0;
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
long izneighminorneigh[6];
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2; // We ignore frills -- how is this okay?
// It's OK for flat BC - just ignore frills always
#ifndef FLATAZBC
printf("not ok\n");
#endif
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
}
else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
};
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal, integ_grad_Az;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapVT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
memcpy(izneighminorneigh, p_izNeighMinor + 6 * izTri[i], sizeof(long) * 6);
int j = 0;
while ((j < 6) && (izneighminorneigh[j] != iVertex + BEGINNING_OF_CENTRAL)) j++;
if (j == 6) {
printf("ERROR ERROR ERROR %d %d \n", iVertex, izTri[i]);
}
else {
p_contriblist[izTri[i] * 6 + j] = integ_grad_Az.dot(edge_normal) / area_quadrilateral;
};
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E \n",
iVertex, i, izTri[i],
ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
++iprev;
}; // next i
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// Now add on the final sides to give area:
// 3 4
// 2 1 0
// endpt0=endpt1 is now the point north of edge facing 2 anyway.
f64_vec2 projendpt1;
if (info.flag == OUTERMOST) {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
}
else {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
};
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// line between out-projected points
// Not sure this AreaMinor is the right one.
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
// NEW ADDITION:
#ifdef RADIALDECLINEAZBC
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / FRILL_CENTROID_OUTER_RADIUS_d;
}
#endif
#ifdef DIRICHLETAZBC
// In this case let the value beyond OUTERMOST be taken as 0 for now;
// we can then substitute the value of the 1D radial array if we can use that.
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / (2.0*(FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus()));
}
#endif
// if it's flat do nothing
};
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor; // reset just because otherwise we're inconsistent about area/position in a subcycle
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
}
else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
// This shouldn't be necessary anyway but is especially no good if it's not meant to be flat
#ifndef FLATAZBC
printf("This bit needs to change.\n");
#endif
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
// neighbour's not a frill
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (izNeighMinor[i] >= BEGINNING_OF_CENTRAL) {
integralLapTV[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
}
else {
integralLapTT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
};
if (TESTTRI3) {
printf("iMinor %d izNeighMinor[i] %d ourAz %1.9E theirs %1.9E contrib %1.12E \n",
iMinor, izNeighMinor[i], ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral);
};
};
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iMinor] = AreaMinor; // reset for each substep
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + k];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + k];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + k];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + s - 1];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + s - 1];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + s - 1];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_integralLapAz[blockIdx.x] = integralLapAz[0];
p_integralVT[blockIdx.x] = integralLapVT[0];
p_integralTV[blockIdx.x] = integralLapTV[0];
p_integralTT[blockIdx.x] = integralLapTT[0];
}
}*/
/*
__global__ void kernelGetLap_minor__sum_detectcontribs(
structural * __restrict__ p_info,
f64 * __restrict__ p_Az,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ p_AreaMinor,
f64 * __restrict__ p_integralLapAz,
f64 * __restrict__ p_integralVT,
f64 * __restrict__ p_integralTV,
f64 * __restrict__ p_integralTT,
f64 * __restrict__ p_contriblist
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64 integralLapAz[threadsPerTileMinor];
__shared__ f64 integralLapVT[threadsPerTileMinor];
__shared__ f64 integralLapTV[threadsPerTileMinor];
__shared__ f64 integralLapTT[threadsPerTileMinor];
// __shared__ f64 sum1[threadsPerTileMinor];
// __shared__ f64 sum2[threadsPerTileMinor];
// __shared__ f64 sum3[threadsPerTileMinor];
// 4.5 per thread.
// Not clear if better off with L1 or shared mem in this case?? Probably shared mem.
// For now, stick with idea that vertices have just major indices that come after tris.
// Minor indices are not made contiguous - although it might be better ultimately.
integralLapAz[threadIdx.x] = 0.0;
integralLapVT[threadIdx.x] = 0.0;
integralLapTV[threadIdx.x] = 0.0;
integralLapTT[threadIdx.x] = 0.0;
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
long izneighminorneigh[6];
shared_pos[threadIdx.x] = p_info[iMinor].pos;
shared_Az[threadIdx.x] = p_Az[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Az_verts[threadIdx.x] = p_Az[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevAz = p_Az[izTri[iprev]];
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppAz = p_Az[izTri[i]];
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
short inext, iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2; // We ignore frills -- how is this okay?
// It's OK for flat BC - just ignore frills always
#ifndef FLATAZBC
printf("not ok\n");
#endif
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
}
else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
};
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextAz = p_Az[izTri[inext]];
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal, integ_grad_Az;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapVT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (TESTLAP) printf("iVertex %d izTri[%d] %d ourAz %1.8E oppAz %1.8E contrib %1.14E "
"grad Az %1.9E %1.9E \n",
iVertex, i, izTri[i],
ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral,
integ_grad_Az.x / area_quadrilateral,
integ_grad_Az.y / area_quadrilateral);
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevAz = oppAz;
oppAz = nextAz;
++iprev;
}; // next i
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// Now add on the final sides to give area:
// 3 4
// 2 1 0
// endpt0=endpt1 is now the point north of edge facing 2 anyway.
f64_vec2 projendpt1;
if (info.flag == OUTERMOST) {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
}
else {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
};
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// line between out-projected points
// Not sure this AreaMinor is the right one.
// But this points up why CG doesn't roll properly. The presence of the AreaMinor factor makes
// the equations not symmetric.
// NEW ADDITION:
#ifdef RADIALDECLINEAZBC
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / FRILL_CENTROID_OUTER_RADIUS_d;
}
#endif
#ifdef DIRICHLETAZBC
// In this case let the value beyond OUTERMOST be taken as 0 for now;
// we can then substitute the value of the 1D radial array if we can use that.
if (info.flag == OUTERMOST)
{
// set outer value at Az = (r_here/r_outer)*Az_self
// contrib to integral of Lap is [delta_edge * dAz/dr] =
// delta_edge*Az_self * (r here - r outer)/(r_outer^2 - r_outer*r_here)
// = delta_edge*Az_self * (-1.0)/(r_outer)
f64 delta_edge = (projendpt0 - projendpt1).modulus();
Our_integral_Lap_Az -= delta_edge*ourAz / (2.0*(FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus()));
}
#endif
// if it's flat do nothing
};
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor; // reset just because otherwise we're inconsistent about area/position in a subcycle
}; // was thread in the first half of the block
info = p_info[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// izNeighMinor[0] is actually vertex 0 if you are triangle 0.
// Rethink:
// Try izNeighMinor[3] because this is meant to be neighbour 0.
// Why are we doing all this? Just set = 0 out here.
p_LapAz[iMinor] = 0.0;
}
else {
f64 Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevAz = p_Az[izNeighMinor[iprev]];
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
oppAz = p_Az[izNeighMinor[i]];
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextAz = p_Az[izNeighMinor[inext]];
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
// This shouldn't be necessary anyway but is especially no good if it's not meant to be flat
#ifndef FLATAZBC
printf("This bit needs to change.\n");
#endif
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
// neighbour's not a frill
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
integralLapAz[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (izNeighMinor[i] >= BEGINNING_OF_CENTRAL) {
integralLapTV[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
f64 contrib = integ_grad_Az.dot(edge_normal) / area_quadrilateral;
f64 alleged = p_contriblist[iMinor * 6 + i];
f64 sum = (alleged + contrib);
if (fabs(sum) > 1.0e-4) printf("%d from %d : contrib %1.14E alleged %1.14E sum %1.8E\n",
iMinor, izNeighMinor[i], contrib, alleged, sum);
}
else {
integralLapTT[threadIdx.x] += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
};
if (TESTTRI3) {
printf("iMinor %d izNeighMinor[i] %d ourAz %1.9E theirs %1.9E contrib %1.12E \n",
iMinor, izNeighMinor[i], ourAz, oppAz,
integ_grad_Az.dot(edge_normal) / area_quadrilateral);
};
};
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevAz = oppAz;
oppAz = nextAz;
prevpos = opppos;
opppos = nextpos;
};
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_AreaMinor[iMinor] = AreaMinor; // reset for each substep
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + k];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + k];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + k];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
integralLapAz[threadIdx.x] += integralLapAz[threadIdx.x + s - 1];
integralLapVT[threadIdx.x] += integralLapVT[threadIdx.x + s - 1];
integralLapTV[threadIdx.x] += integralLapTV[threadIdx.x + s - 1];
integralLapTT[threadIdx.x] += integralLapTT[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_integralLapAz[blockIdx.x] = integralLapAz[0];
p_integralVT[blockIdx.x] = integralLapVT[0];
p_integralTV[blockIdx.x] = integralLapTV[0];
p_integralTT[blockIdx.x] = integralLapTT[0];
}
}*/
__global__ void kernelComputeJacobianValues(
structural * __restrict__ p_info,
// f64 * __restrict__ p_Aznext,
// f64 * __restrict__ p_Azk,
// f64 * __restrict__ pAzdot0,
f64 * __restrict__ pgamma,
f64 const h_use,
long * __restrict__ p_indic,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_Jacobianesque_list)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor]; // 16K for these - plentiful
// __shared__ f64 shared_Az[threadsPerTileMinor];
// __shared__ f64 shared_Az_verts[threadsPerTileMajor]; // 4.5 things
f64 d_eps_by_dbeta_j[SQUASH_POINTS]; // 24 max
// __shared__ f64 d_eps_by_dbeta_j_verts[SQUASH_POINTS*threadsPerTileMajor];
// need to acquire sums of products of these so need 1 for every tri and vertex
// ...
// would have been better off by far not trying to preserve data, but simply splitting into 2 routines.
// Maybe we should be accumulating in-between instead. Better.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info[iMinor].pos;
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
};
__syncthreads();
f64_vec2 opppos, prevpos, nextpos;
int iWhich, j;
int iWhichPrev, iWhichSelf, iWhichNext, iWhichOpp;
if (threadIdx.x < threadsPerTileMajor) {
iWhichSelf = p_indic[iVertex + BEGINNING_OF_CENTRAL];
memset(d_eps_by_dbeta_j, 0, sizeof(f64)*SQUASH_POINTS);
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
f64_vec2 endpt0, endpt1;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
// ourAz = shared_Az_verts[threadIdx.x];
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
iWhichPrev = p_indic[izTri[iprev]];
short i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
} else {
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
iWhichOpp = p_indic[izTri[i]];
// Handle case that prev is a frill. What to do then?
// Not sure which way numbers go.But either way if prev is a frill then it's our 0th tri that is the governor.
f64 prevfactor = 1.0;
f64 nextfactor;
if ((info.flag == INNERMOST) &&
(prevpos.dot(prevpos) < 1.0000001*1.0000001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
iWhichPrev = iWhichOpp;
};
if ((info.flag == OUTERMOST) && (prevpos.dot(prevpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
iWhichPrev = iWhichOpp;
prevfactor = (opppos.modulus() / prevpos.modulus());
};
f64_vec2 store_centroid = opppos;
endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 store_first_point = endpt0;
short inext, iend = tri_len;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST))
iend = tri_len - 2;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
if (iend > MAXNEIGH) printf("####################\nvertex %d iend = %d info.neigh_len = %d\n", iVertex, iend, info.neigh_len);
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
iWhichNext = p_indic[izTri[inext]];
nextfactor = 1.0;
if ((info.flag == INNERMOST) &&
(nextpos.dot(nextpos) < 1.0000001*1.0000001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
iWhichNext = iWhichOpp;
};
if ((info.flag == OUTERMOST) &&
(nextpos.dot(nextpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
iWhichNext = iWhichOpp;
nextfactor = (opppos.modulus() / nextpos.modulus());
};
endpt1 = THIRD * (nextpos + info.pos + opppos);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
// Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (iWhichPrev > 0) {
d_eps_by_dbeta_j[iWhichPrev-1] += prevfactor*0.5*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
// Just add unnomralized here
}
if (iWhichOpp > 0) {
d_eps_by_dbeta_j[iWhichOpp-1] += 0.5*((nextpos.y-prevpos.y)*edge_normal.x
- (nextpos.x-prevpos.x)*edge_normal.y) / area_quadrilateral;
// Just add unnomralized here
}
if (iWhichNext > 0) {
d_eps_by_dbeta_j[iWhichNext-1] += nextfactor*0.5*((info.pos.y-opppos.y)*edge_normal.x
+ (opppos.x-info.pos.x)*edge_normal.y) / area_quadrilateral;
// Just add unnomralized here
}
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] += 0.5*((prevpos.y-nextpos.y)*edge_normal.x
- (prevpos.x-nextpos.x)*edge_normal.y) / area_quadrilateral;
// Just add unnomralized here
}
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
iWhichPrev = iWhichOpp;
iWhichOpp = iWhichNext;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
}
if (info.flag == OUTERMOST)
{
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
f64 opp_prev = 0.0, next_ours = 0.0;
if (RADIALDECLINE) {
info.pos.project_to_radius(nextpos, info.pos.modulus() + (FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus())*1.16);
endpt1 = THIRD*(opppos + info.pos + nextpos);
opp_prev = (prevpos.modulus() / opppos.modulus());
next_ours = (info.pos.modulus() / nextpos.modulus());
};
if (!RADIALDECLINE) {
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) {
// iWhichPrev is already set to previous iWhichOpp.
// hold on to it
if (iWhichPrev > 0) {
d_eps_by_dbeta_j[iWhichPrev - 1]
+= (0.5*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y)
+ 0.5*opp_prev*(
(nextpos.y-prevpos.y)*edge_normal.x
-(nextpos.x-prevpos.x)*edge_normal.y
)
)/ area_quadrilateral;
}
//iWhichSelf = p_indic[iVertex + BEGINNING_OF_CENTRAL]; // already set.
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] += (0.5*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y)
+ 0.5*next_ours*(
(info.pos.y-opppos.y)*edge_normal.x
- (info.pos.x-opppos.x)*edge_normal.y
)
)/ area_quadrilateral;
}
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
// NOW WE ARE GOING TO LOOK OUTWARDS
// iprev IS NOT UPDATED
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
f64 next_0 = 0.0;
endpt1 = THIRD*(opppos + info.pos + nextpos);
if (RADIALDECLINE) {
next_0 = (store_centroid.modulus() / nextpos.modulus());
}
if (!RADIALDECLINE) {
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
//
// So now ... opp_prev is still there and must attribute the prev coefficient
// to the prev-1 index
//
// next_ours is now opp, and must attribute the opp coefficient to ourselves
//
// and the next coefficient now applies for index 0 with coeff next_0
// We have not updated iprev.
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
// WE ARE NOW LOOKING DIRECTLY OUTWARDS.
// "iprev" is now the previous one to prev and still relevant in case of radial decline
// next_ours is now for the opposite one
// next_0 relates the next position to the effect of the 0th value.
if (RADIALDECLINE) {
if (iWhichPrev > 0) {
d_eps_by_dbeta_j[iWhichPrev - 1] +=
0.5*opp_prev*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] +=
0.5*(1.0-next_ours)*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y) / area_quadrilateral;
};
iWhichNext = p_indic[izTri[0]];
if (iWhichNext > 0) {
d_eps_by_dbeta_j[iWhichNext - 1] +=
0.5*next_0*((info.pos.y - opppos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
};
if (DIRICHLET) {
// May be nonsense.
iWhich = p_indic[izTri[iprev]]; //frill!
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
iWhich = p_indic[izTri[inext]]; // frill!
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((info.pos.y - opppos.y)*edge_normal.x
+ (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
iWhich = p_indic[iVertex + BEGINNING_OF_CENTRAL];
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y) / area_quadrilateral;
};
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
// WE ARE GOING TO LOOK NORTHEAST
endpt1 = store_first_point;
nextpos = p_info[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (RADIALDECLINE) {
// prevAz = next_ours* self [iWhichSelf]
// oppAz = next_0 * 0th value [iWhichNext]
// nextAz = 0th value [iWhichNext]
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] +=
0.5*next_ours*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
}
if (iWhichNext > 0) {
d_eps_by_dbeta_j[iWhichNext - 1] +=
0.5*(next_0*((nextpos.y - prevpos.y)*edge_normal.x
- (nextpos.x - prevpos.x)*edge_normal.y)
+
(info.pos.y-opppos.y)*edge_normal.x
- (opppos.y-info.pos.y)*edge_normal.y
) / area_quadrilateral;
}
}
if (DIRICHLET) {
iWhich = p_indic[izTri[i]]; // frill!
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((nextpos.y - prevpos.y)*edge_normal.x
- (nextpos.x - prevpos.x)*edge_normal.y) / area_quadrilateral;
}
iWhich = p_indic[izTri[inext]];
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((info.pos.y - opppos.y)*edge_normal.x
+ (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
iWhich = p_indic[iVertex + BEGINNING_OF_CENTRAL];
if (iWhich > 0) {
d_eps_by_dbeta_j[iWhich - 1] += 0.5*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y) / area_quadrilateral;
};
};
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
f64 gamma = pgamma[iVertex + BEGINNING_OF_CENTRAL];
for (iWhich = 0; iWhich < SQUASH_POINTS; iWhich++)
d_eps_by_dbeta_j[iWhich] *= -h_use*gamma/AreaMinor;
// d eps_i / d x_j = [i==j]*1 - h gamma d[Lap here]/dx_j
if (p_indic[iVertex + BEGINNING_OF_CENTRAL] > 0) d_eps_by_dbeta_j[p_indic[iVertex + BEGINNING_OF_CENTRAL] - 1] += 1.0;
// p_indic[iVertex + BEGINNING_OF_CENTRAL]-1 is the number of its volley. Stupid system.
// For simplicity let's say we save off into global memory.
memcpy(&(p_Jacobianesque_list[(iVertex + BEGINNING_OF_CENTRAL)*SQUASH_POINTS]),
d_eps_by_dbeta_j, sizeof(f64)*SQUASH_POINTS); // d eps_i / dbeta_j
// if (iVertex + BEGINNING_OF_CENTRAL == MyMaxIndex) {
// for (j = 0; j < SQUASH_POINTS; j++)
// printf("%d : coeff %d : %1.9E \n", iVertex + BEGINNING_OF_CENTRAL, j,
// d_eps_by_dbeta_j[j]);
// }
}; // was thread in the first half of the block
memset(d_eps_by_dbeta_j, 0, sizeof(f64)*SQUASH_POINTS);
f64 prevfactor, oppfactor, nextfactor;
info = p_info[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) return;
iWhichSelf = p_indic[iMinor];
// p_LapAz[iMinor] = 0.0;
// } else {
f64 AreaMinor = 0.0;
short inext, i = 0, iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
if ((prevpos.dot(prevpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
// outer frill under radial decline:
//prevAz = ourAz*(info.pos.modulus() / nextpos.modulus());
// do this by resetting iWhichPrev and a factor. !!
iWhichPrev = iWhichSelf;
prevfactor = (info.pos.modulus() / nextpos.modulus());
} else {
iWhichPrev = p_indic[izNeighMinor[iprev]];
prevfactor = 1.0;
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
} else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
if ((opppos.dot(opppos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
// outer frill
//oppAz = ourAz*(info.pos.modulus() / nextpos.modulus());
iWhichOpp = iWhichSelf;
oppfactor = (info.pos.modulus() / nextpos.modulus());
} else {
iWhichOpp = p_indic[izNeighMinor[i]];
oppfactor = 1.0;
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
iprev = i - 1; if (iprev < 0) iprev = 5;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
} else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
if ((nextpos.dot(nextpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
&& (RADIALDECLINE))
{
// outer frill
//nextAz = ourAz*(info.pos.modulus() / nextpos.modulus());
iWhichNext = iWhichSelf;
nextfactor = (info.pos.modulus() / nextpos.modulus());
} else {
iWhichNext = p_indic[izNeighMinor[inext]];
nextfactor = 1.0;
};
// ______________________________________________________-
//f64_vec2 integ_grad_Az;
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
f64_vec2 edge_normal;
edge_normal.x = THIRD*(nextpos.y - prevpos.y);
edge_normal.y = THIRD*(prevpos.x - nextpos.x);
if (((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
|| (DIRICHLET) || (RADIALDECLINE))
&&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
{
// Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
if (iWhichPrev > 0) {
d_eps_by_dbeta_j[iWhichPrev - 1] += 0.5*((opppos.y - info.pos.y)*edge_normal.x
- (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
}
if (iWhichOpp > 0) {
d_eps_by_dbeta_j[iWhichOpp - 1] += 0.5*((nextpos.y - prevpos.y)*edge_normal.x
- (nextpos.x - prevpos.x)*edge_normal.y) / area_quadrilateral;
}
if (iWhichNext > 0) {
d_eps_by_dbeta_j[iWhichNext - 1] += 0.5*((info.pos.y - opppos.y)*edge_normal.x
+ (opppos.x - info.pos.x)*edge_normal.y) / area_quadrilateral;
};
if (iWhichSelf > 0) {
d_eps_by_dbeta_j[iWhichSelf - 1] += 0.5*((prevpos.y - nextpos.y)*edge_normal.x
- (prevpos.x - nextpos.x)*edge_normal.y) / area_quadrilateral;
}
};
AreaMinor += SIXTH*((prevpos.x + info.pos.x + opppos.x) +
(nextpos.x + info.pos.x + opppos.x))*edge_normal.x;
prevpos = opppos;
opppos = nextpos;
iWhichPrev = iWhichOpp;
prevfactor = oppfactor;
iWhichOpp = iWhichNext;
oppfactor = nextfactor;
};
f64 gamma = pgamma[iMinor];
for (iWhich = 0; iWhich < SQUASH_POINTS; iWhich++)
d_eps_by_dbeta_j[iWhich] *= -h_use*gamma / AreaMinor;
// d eps_i / d x_j = [i==j]*1 - h gamma d[Lap here]/dx_j
//if (p_indic[iMinor] > SQUASH_POINTS) {
// printf("ERROR %d p_indic[iMinor] %d \n", iMinor, p_indic[iMinor]);
// // $$$$$$$$$$$
// // DEBUG
// // $$$$$$$$$$$
//} else {
if (iWhichSelf > 0) d_eps_by_dbeta_j[iWhichSelf - 1] += 1.0;
//}
// p_indic[iMinor]-1 is the number of its volley. Stupid system.
// For simplicity let's say we save off into global memory.
// if (p_indic[iMinor]>0) printf("indic %d found at %d; deps = %1.9E\n", p_indic[iMinor], iMinor,
// d_eps_by_dbeta_j[p_indic[iMinor] - 1]);
//
memcpy(&(p_Jacobianesque_list[iMinor*SQUASH_POINTS]),d_eps_by_dbeta_j,
sizeof(f64)*SQUASH_POINTS); // d eps_i / dbeta_j
// if (iMinor == MyMaxIndex) {
// for (j = 0; j < SQUASH_POINTS; j++)
// printf("%d : coeff %d : %1.9E \n", iMinor, j, d_eps_by_dbeta_j[j]);
// };
//};
}
__global__ void kernelGetLapCoeffs_and_min(
structural * __restrict__ p_info,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_LapCoeffSelf,
f64 * __restrict__ p_min_array,
long * __restrict__ p_min_index)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ char shared_flag[threadsPerTileMinor];
__shared__ f64 mincoeffself[threadsPerTileMinor];
__shared__ long iMin[threadsPerTileMinor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
structural info = p_info[iMinor];
shared_pos[threadIdx.x] = info.pos;
shared_flag[threadIdx.x] = info.flag;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
};
__syncthreads();
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
mincoeffself[threadIdx.x] = 0.0;
iMin[threadIdx.x] = -1;
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az_contrib_from_own_Az = 0.0;
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + MAXNEIGH*iVertex, sizeof(long)*MAXNEIGH);
memcpy(szPBC, p_szPBCtri_vertex + MAXNEIGH*iVertex, sizeof(char)*MAXNEIGH);
// Is this best way? better than going looking for periodic data on each tri.
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prevpos = p_info[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opppos = p_info[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
f64_vec2 store_first_point = endpt0;
short iend = tri_len;
f64_vec2 projendpt0;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) iend = tri_len - 2;
// Bear in mind for OUTERMOST, the triangles go clockwise not anticlockwise.
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(1.0)*(info.pos.y - nextpos.y)
+ (1.0)*(prevpos.y - info.pos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(1.0)*(info.pos.x - nextpos.x)
+ (1.0)*(prevpos.x - info.pos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
// if (iVertex + BEGINNING_OF_CENTRAL == CHOSEN) {
// printf("%d contrib %1.14E %d \nourpos %1.14E %1.14E opppos %1.14E %1.14E \n"
// "prevpos nextpos %1.14E %1.14E %1.14E %1.14E\n"
// "szPBC[i] %d area_quadrilateral %1.14E \n",
// iVertex + BEGINNING_OF_CENTRAL,
// integ_grad_Az.dot(edge_normal) / area_quadrilateral,
// izTri[i],
// info.pos.x,info.pos.y,opppos.x,opppos.y,
// prevpos.x,prevpos.y,nextpos.x,nextpos.y,
// (int)szPBC[i],area_quadrilateral);
// }
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
++iprev;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
}
if (info.flag == OUTERMOST)
{
f64_vec2 integ_grad_Az;
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
f64 facc = 0.0;
if (RADIALDECLINE) {
info.pos.project_to_radius(nextpos, info.pos.modulus() + (FRILL_CENTROID_OUTER_RADIUS_d - info.pos.modulus())*1.16);
endpt1 = THIRD*(opppos + info.pos + nextpos);
facc = (info.pos.modulus() / nextpos.modulus());
}
if (!RADIALDECLINE) {
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
+ (1.0)*(prevpos.y - nextpos.y)
+ facc*(info.pos.y-opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
+(1.0)*(prevpos.x - nextpos.x)
+ facc*(info.pos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD*(opppos + info.pos + nextpos);
if (!RADIALDECLINE) {
// map radially inwards so that radius is halfway out to the zero arc.
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(1.0)*(prevpos.y - nextpos.y)
+ facc*(nextpos.y-prevpos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(1.0)*(prevpos.x - nextpos.x)
+ facc*(nextpos.x-prevpos.x)
);
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
endpt1 = store_first_point;
nextpos = p_info[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
integ_grad_Az.x = 0.5*(
(1.0)*(prevpos.y - nextpos.y)
+ facc*(opppos.y - info.pos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(1.0)*(prevpos.x - nextpos.x)
+ facc*(opppos.x - info.pos.x)
);
area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (DIRICHLET || RADIALDECLINE) Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
p_LapCoeffSelf[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az_contrib_from_own_Az / AreaMinor;
mincoeffself[threadIdx.x] = p_LapCoeffSelf[iVertex + BEGINNING_OF_CENTRAL];
iMin[threadIdx.x] = iVertex + BEGINNING_OF_CENTRAL;
// All vertices can count for this.
}; // was thread in the first half of the block
info = p_info[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Look at simulation.cpp
// Treatment of FRILLS :
p_LapCoeffSelf[iMinor] = -1.0;
// LapCoefftri[iMinor][3] = 1.0; // neighbour 0
}
else {
f64 Our_integral_Lap_Az_contrib_from_own_Az = 0.0;
f64 AreaMinor = 0.0;
f64 prevfac = 0.0, nextfac = 0.0, oppfac = 0.0;
short iprev = 5; short inext, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
if (prevpos.dot(prevpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
prevfac = (info.pos.modulus() / prevpos.modulus());
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
if (opppos.dot(opppos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
oppfac = (info.pos.modulus() / opppos.modulus());
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
if (nextpos.dot(nextpos) > 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d)
{
// outer frill
if (RADIALDECLINE)
nextfac = (info.pos.modulus() / nextpos.modulus());
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
integ_grad_Az.x = 0.5*(prevpos.y - nextpos.y);
integ_grad_Az.y = -0.5*(prevpos.x - nextpos.x);
integ_grad_Az.x = 0.5*(
(1.0 + nextfac)*(info.pos.y - nextpos.y)
+ (prevfac + 1.0)*(prevpos.y - info.pos.y)
+ (oppfac + prevfac)*(opppos.y - prevpos.y)
+ (nextfac + oppfac)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(1.0 + nextfac)*(info.pos.x - nextpos.x)
+ (prevfac + 1.0)*(prevpos.x - info.pos.x)
+ (oppfac + prevfac)*(opppos.x - prevpos.x)
+ (nextfac + oppfac)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
if (
((opppos.dot(opppos) < 0.99999*0.99999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) || (DIRICHLET)
|| (RADIALDECLINE)) &&
(opppos.dot(opppos) > 1.00001*1.00001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d)
)
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
Our_integral_Lap_Az_contrib_from_own_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
prevfac = oppfac;
oppfac = nextfac;
iprev = i;
// There is an even quicker way which is to rotate pointers. No memcpy needed.
};
p_LapCoeffSelf[iMinor] = Our_integral_Lap_Az_contrib_from_own_Az / AreaMinor;
if (p_LapCoeffSelf[iMinor] < mincoeffself[threadIdx.x])
{
mincoeffself[threadIdx.x] = p_LapCoeffSelf[iMinor];
iMin[threadIdx.x] = iMinor;
};
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
if (mincoeffself[threadIdx.x] > mincoeffself[threadIdx.x + k])
{
mincoeffself[threadIdx.x] = mincoeffself[threadIdx.x + k];
iMin[threadIdx.x] = iMin[threadIdx.x + k];
}
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
if (mincoeffself[threadIdx.x] > mincoeffself[threadIdx.x + s-1])
{
mincoeffself[threadIdx.x] = mincoeffself[threadIdx.x + s-1];
iMin[threadIdx.x] = iMin[threadIdx.x + s-1];
}
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_min_array[blockIdx.x] = mincoeffself[threadIdx.x];
p_min_index[blockIdx.x] = iMin[threadIdx.x];
}
}
/*
__global__ void kernelGetLapCoeffs_and_min_DEBUG(
structural * __restrict__ p_info,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtri_vertex,
char * __restrict__ p_szPBCtriminor, //B
f64 * __restrict__ p_LapCoeffSelf,
f64 * __restrict__ p_min_array,
long * __restrict__ p_min_index) //B
{
Note many changes since this was used; delete and go again.
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ char shared_flag[threadsPerTileMinor];
__shared__ f64 mincoeffself[threadsPerTileMinor];
__shared__ long iMin[threadsPerTileMinor]; // B
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
// code A
structural info = p_info[iMinor];
shared_pos[threadIdx.x] = info.pos;
shared_flag[threadIdx.x] = info.flag;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
};
__syncthreads();
f64_vec2 opppos, prevpos, nextpos;
// Better if we use same share to do both tris and verts
// Idea: let's make it called for # minor threads, each loads 1 shared value,
// and only half the threads run first for the vertex part. That is a pretty good idea.
mincoeffself[threadIdx.x] = 0.0;
iMin[threadIdx.x] = -1;
if (threadIdx.x < threadsPerTileMajor) {
f64 Our_integral_Lap_Az_contrib_from_own_Az = 0.0;
f64 AreaMinor = 0.0;
p_LapCoeffSelf[iVertex + BEGINNING_OF_CENTRAL] = 0.0;
mincoeffself[threadIdx.x] = p_LapCoeffSelf[iVertex + BEGINNING_OF_CENTRAL];
iMin[threadIdx.x] = iVertex + BEGINNING_OF_CENTRAL; // B
// All vertices can count for this.
}; // was thread in the first half of the block
// 2nd commenting
info = p_info[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Look at simulation.cpp
// Treatment of FRILLS :
p_LapCoeffSelf[iMinor] = -1.0;
// LapCoefftri[iMinor][3] = 1.0; // neighbour 0
}
else {
f64 Our_integral_Lap_Az_contrib_from_own_Az = 0.0;
f64 AreaMinor = 0.0;
p_LapCoeffSelf[iMinor] = 0.0;
if (p_LapCoeffSelf[iMinor] < mincoeffself[threadIdx.x])
{
mincoeffself[threadIdx.x] = p_LapCoeffSelf[iMinor];
iMin[threadIdx.x] = iMinor;
};
};
// still fails without the following.
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
if (mincoeffself[threadIdx.x] > mincoeffself[threadIdx.x + k])
{
mincoeffself[threadIdx.x] = mincoeffself[threadIdx.x + k];
iMin[threadIdx.x] = iMin[threadIdx.x + k];
}
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
if (mincoeffself[threadIdx.x] > mincoeffself[threadIdx.x + s - 1])
{
mincoeffself[threadIdx.x] = mincoeffself[threadIdx.x + s - 1];
iMin[threadIdx.x] = iMin[threadIdx.x + s - 1];
}
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_min_array[blockIdx.x] = mincoeffself[threadIdx.x];
p_min_index[blockIdx.x] = iMin[threadIdx.x];
}
}*/
// Correct disposition of routines:
// --- union of T and [v + v_overall] -- uses n_shards --> pressure, momflux, grad Te
// --- union of T and [v + v_overall] -- uses n_n shards --> neutral pressure, neutral momflux
// --- Az,Azdot + v_overall -- runs for whole domain ---> Lap A, curl A, grad A, grad Adot, ROCAz, ROCAzdot
// ^^ base off of GetLap_minor.
// Worst case number of vars:
// (4+2)*1.5+6.5 <-- because we use v_vertex. + 3 for positions.
// What can we stick in L1? n_cent we could.
// We should be aiming a ratio 3:1 from shared:L1, if registers are small.
// For tris we are using n_shards from shared points.
// And it is for tris that we require vertex data v to be present.
// Idea: vertex code determines array of 12 relevant n and sticks them into shared.
// Only saved us 1 var. 9 + 6 + 3 = 18.
// Still there is premature optimization here -- none of this happens OFTEN.
// ever called?
/*
__global__ void kernelCreate_pressure_gradT_and_gradA_LapA_CurlA_minor(
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_minor,
AAdot * __restrict__ p_AAdot,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just so we can handle insulator
f64_vec2 * __restrict__ p_GradTe,
f64_vec2 * __restrict__ p_GradAz,
f64 * __restrict__ p_LapAz,
f64 * __restrict__ ROCAzduetoAdvection,
f64 * __restrict__ ROCAzdotduetoAdvection,
f64_vec2 * __restrict__ p_v_overall_minor,
f64_vec3 * __restrict__ p_B,
f64 * __restrict__ p_AreaMinor
)
{
// Getting this down to 8 vars we could have 512 threads (12 vars/thread total with vertex vars)
// Down to 6 -> 9 total -> 600+ threads
// Worry later.
__shared__ T2 shared_T[threadsPerTileMinor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64 shared_Azdot[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
// Problem: we only have room for 1 at a time. Have to run again with n_n. Too bad.
// Live with it and push through.
// This applies to both vertices and triangles. And putting in L1 unshared is not better.
// We can imagine doing it some other way but using shards is true to the design that was created on CPU.
// Of course this means we'd be better off putting
// We could also argue that with shards for n_ion in memory we are better off doing an overwrite and doing stuff for nv also.
// never mind that for now
__shared__ T2 shared_T_verts[threadsPerTileMajor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64 shared_Azdot_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
// There is a good argument for splitting out A,Adot to a separate routine.
// That way we could have 10.5 => 585 ie 576 = 288*2 threads.
// Here we got (2+1+1+2)*1.5 = 9 , + 6.5 = 15.5 -> 384 minor threads max.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
{
AAdot temp = p_AAdot[iMinor];
shared_Az[threadIdx.x] = temp.Az;
shared_Azdot[threadIdx.x] = temp.Azdot;
}
{
T3 T_ = p_T_minor[iMinor];
shared_T[threadIdx.x].Te = T_.Te;
shared_T[threadIdx.x].Ti = T_.Ti;
}
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
AAdot temp = p_AAdot[iVertex + BEGINNING_OF_CENTRAL];
shared_Az_verts[threadIdx.x] = temp.Az;
shared_Azdot_verts[threadIdx.x] = temp.Azdot;
T3 T_ = p_T_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_T_verts[threadIdx.x].Te = T_.Te;
shared_T_verts[threadIdx.x].Ti = T_.Ti; // MOVED THIS OUT OF the following branch to see it match CPU
if (info.flag == DOMAIN_VERTEX) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
}
else {
// save several bus trips;
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
//shared_T_verts[threadIdx.x].Te = 0.0;
//shared_T_verts[threadIdx.x].Ti = 0.0;
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
};
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
f64 ourAzdot, oppAzdot, prevAzdot, nextAzdot;
f64_vec2 opppos, prevpos, nextpos;
T2 oppT, prevT, nextT;
//nvals our_n, opp_n, prev_n, next_n;
f64_vec2 Our_integral_curl_Az, Our_integral_grad_Az, Our_integral_grad_Azdot, Our_integral_grad_Te;
f64 Our_integral_Lap_Az;
if (threadIdx.x < threadsPerTileMajor) {
Our_integral_curl_Az.x = 0.0;
Our_integral_curl_Az.y = 0.0;
Our_integral_grad_Az.x = 0.0;
Our_integral_grad_Az.y = 0.0;
Our_integral_grad_Azdot.x = 0.0;
Our_integral_grad_Azdot.y = 0.0;
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
f64_vec3 MAR_ion, MAR_elec;
memcpy(&(MAR_ion), &(p_MAR_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
memcpy(&(MAR_elec), &(p_MAR_elec[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
ourAz = shared_Az_verts[threadIdx.x];
ourAzdot = shared_Azdot_verts[threadIdx.x];
if (info.flag == DOMAIN_VERTEX) {
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevT = shared_T[izTri[iprev] - StartMinor];
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevAzdot = shared_Azdot[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
T3 prev_T = p_T_minor[izTri[iprev]];
prevT.Te = prev_T.Te; prevT.Ti = prev_T.Ti;
AAdot temp = p_AAdot[izTri[iprev]];
prevAz = temp.Az;
prevAzdot = temp.Azdot;
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppT = shared_T[izTri[i] - StartMinor];
oppAz = shared_Az[izTri[i] - StartMinor];
oppAzdot = shared_Azdot[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
T3 opp_T = p_T_minor[izTri[i]];
oppT.Te = opp_T.Te; oppT.Ti = opp_T.Ti;
AAdot temp = p_AAdot[izTri[i]];
oppAz = temp.Az;
oppAzdot = temp.Azdot;
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
// Think carefully: DOMAIN vertex cases for n,T ...
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt1, endpt0 = THIRD * (info.pos + opppos + prevpos);
short iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2;
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
}
else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
}
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextT = shared_T[izTri[inext] - StartMinor];
nextAz = shared_Az[izTri[inext] - StartMinor];
nextAzdot = shared_Azdot[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
T3 next_T = p_T_minor[izTri[inext]];
nextT.Te = next_T.Te; nextT.Ti = next_T.Ti;
AAdot temp = p_AAdot[izTri[inext]];
nextAz = temp.Az;
nextAzdot = temp.Azdot;
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
f64_vec2 integ_grad_Az;
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
T2 T0, T1;
f64 n1;
T0.Te = THIRD* (prevT.Te + shared_T_verts[threadIdx.x].Te + oppT.Te);
T1.Te = THIRD * (nextT.Te + shared_T_verts[threadIdx.x].Te + oppT.Te);
T0.Ti = THIRD * (prevT.Ti + shared_T_verts[threadIdx.x].Ti + oppT.Ti);
T1.Ti = THIRD * (nextT.Ti + shared_T_verts[threadIdx.x].Ti + oppT.Ti);
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
// So this is pretty stupid ---
// If shardmodel went for flat then we have decided that there is no pressure gradient affecting v here.
// Mind you we didn't expect it to be flat nearly as often as it is flat.
// Think carefully about what pressure we want to feel.
// It makes a kind of sense if you have a cliff of density then you feel it in the triangle in between.
// But that won't push points apart. It just sends stuff through the wall.
// It's a shame we can't just use actual n values to infer gradient over a region.
// It probably creates wobbles in v as well, because if we move fast particles at edge then we leave
// Behind a still-lower v in the vertex-centered minor.
// The scheme is kind of skewiffifying.
// Assume neighs 0,1 are relevant to border with tri 0 minor
// To get integral grad we add the averages along the edges times edge_normals
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// if (iVertex == VERT1) {
// printf("GPUpressure %d MAR_ion.x %1.12E contrib.x %1.12E n0 %1.12E Ti0 %1.9E n1 %1.9E Ti1 %1.9E edge_normal.x %1.12E \n",
// CHOSEN, MAR_ion.x,
// -0.5*(n0*T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.x,
// n0, T0.Ti, n1, T1.Ti, edge_normal.x);
// }
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
// if (iVertex + BEGINNING_OF_CENTRAL == CHOSEN)
// printf("GPU %d : GradTe contrib %1.14E %1.14E Te %1.14E opp %1.14E next %1.14E prev %1.14E edge_normal %1.14E %1.14E\n", iVertex + BEGINNING_OF_CENTRAL,
// 0.5*(T0.Te + T1.Te) * edge_normal.x,
//0.5*(T0.Te + T1.Te) * edge_normal.y,
// shared_T_verts[threadIdx.x].Te, oppT.Te, nextT.Te, prevT.Te,
//edge_normal.x, edge_normal.y);
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
f64 Azdot_edge = SIXTH * (2.0*ourAzdot + 2.0*oppAzdot + prevAzdot + nextAzdot);
Our_integral_grad_Azdot += Azdot_edge * edge_normal;
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az += Az_edge * (endpt1 - endpt0);
// Missing a factor of 3 possibly?
// ??????????????????????????????????????????????????????????????
// if (Az_edge != Az_edge)
// printf("GPU vert %d Az_edge %1.14E oppAz %1.14E endpt1 %1.14E %1.14E Integ_curl %1.14E %1.14E\n",
// iVertex, Az_edge, oppAz, endpt1.x,endpt1.y, Our_integral_curl_Az.x, Our_integral_curl_Az.y
// );
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prevAz = oppAz;
prevAzdot = oppAzdot;
prevT = oppT;
opppos = nextpos;
oppAz = nextAz;
oppAzdot = nextAzdot;
oppT = nextT;
}; // next i
//if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// // This will never happen because we just asked info.flag == DOMAIN_VERTEX !!
// // Now add on the final sides to give area:
// // 3 4
// // 2 1 0
// // endpt0=endpt1 is now the point north of edge facing 2 anyway.
// f64_vec2 projendpt1;
// if (info.flag == OUTERMOST) {
// endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
// }
// else {
// endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
// };
// edge_normal.x = projendpt1.y - endpt1.y;
// edge_normal.y = endpt1.x - projendpt1.x;
// AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
// edge_normal.x = projendpt0.y - projendpt1.y;
// edge_normal.y = projendpt1.x - projendpt0.x;
// AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// // line between out-projected points
//};
p_GradAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Az / AreaMinor;
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_GradTe[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Te / AreaMinor;
p_B[iVertex + BEGINNING_OF_CENTRAL] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT);
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor;
// if (iVertex + BEGINNING_OF_CENTRAL == CHOSEN) printf("Our_integral_grad_Te.x %1.14E AreaMinor %1.14E\n\n",
// Our_integral_grad_Te.x, AreaMinor);
// wow :
f64_vec2 overall_v_ours = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
ROCAzduetoAdvection[iVertex + BEGINNING_OF_CENTRAL] = overall_v_ours.dot(Our_integral_grad_Az / AreaMinor);
ROCAzdotduetoAdvection[iVertex + BEGINNING_OF_CENTRAL] = overall_v_ours.dot(Our_integral_grad_Azdot / AreaMinor);
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iVertex + BEGINNING_OF_CENTRAL, &MAR_ion, sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex + BEGINNING_OF_CENTRAL, &MAR_elec, sizeof(f64_vec3));
}
else {
// NOT domain vertex: Do Az, Azdot only:
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevAzdot = shared_Azdot[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
AAdot temp = p_AAdot[izTri[iprev]];
prevAz = temp.Az;
prevAzdot = temp.Azdot;
prevpos = p_info_minor[izTri[iprev]].pos;
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppAz = shared_Az[izTri[i] - StartMinor];
oppAzdot = shared_Azdot[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
AAdot temp = p_AAdot[izTri[i]];
oppAz = temp.Az;
oppAzdot = temp.Azdot;
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1;
short iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2;
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
}
else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
}
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextAz = shared_Az[izTri[inext] - StartMinor];
nextAzdot = shared_Azdot[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
AAdot temp = p_AAdot[izTri[inext]];
nextAz = temp.Az;
nextAzdot = temp.Azdot;
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64_vec2 integ_grad_Az;
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// pData[iDestTri].B -= Az_edge * (endpt1 - endpt0); // MUST DIVIDE BY AREA
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
// To get integral grad we add the averages along the edges times edge_normals
// f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
// f64 Azdot_edge = SIXTH * (2.0*ourAzdot + 2.0*oppAzdot + prevAzdot + nextAzdot);
// Our_integral_grad_Azdot += Azdot_edge * edge_normal;
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0);
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
prevAz = oppAz;
prevAzdot = oppAzdot;
opppos = nextpos;
oppAz = nextAz;
oppAzdot = nextAzdot;
}; // next i
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// Now add on the final sides to give area:
// 3 4
// 2 1 0
// endpt0=endpt1 is now the point north of edge facing 2 anyway.
f64_vec2 projendpt1;
if (info.flag == OUTERMOST) {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_OUTER_RADIUS_d);
}
else {
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
};
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// line between out-projected points
};
p_GradAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Az / AreaMinor; // 0,0
p_LapAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_Lap_Az / AreaMinor;
p_B[iVertex + BEGINNING_OF_CENTRAL] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT); // 0,0, BZ
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor;
ROCAzduetoAdvection[iVertex + BEGINNING_OF_CENTRAL] = 0.0;
ROCAzdotduetoAdvection[iVertex + BEGINNING_OF_CENTRAL] = 0.0;
p_GradTe[iVertex + BEGINNING_OF_CENTRAL] = Vector2(0.0, 0.0);
}; // // was it domain vertex or Az-only
};// if (threadIdx.x < threadsPerTileMajor)
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// T2 prevT, nextT, oppT;
//f64 prevAz, nextAz, oppAz, ourAz;
//f64 prevAzdot, nextAzdot, oppAzdot, ourAzdot;
f64_vec3 MAR_ion, MAR_elec;
// this is not a clever way of doing it. Want more careful.
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
if ((izNeighMinor[3] >= StartMinor) && (izNeighMinor[3] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[3] - StartMinor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[3]];
oppAz = temp.Az;
};
p_LapAz[iMinor] = oppAz - ourAz;
ROCAzduetoAdvection[iMinor] = 0.0;
ROCAzdotduetoAdvection[iMinor] = 0.0;
p_GradAz[iMinor] = Vector2(0.0, 0.0);
memset(&(p_B[iMinor]), 0, sizeof(f64_vec3));
p_GradTe[iMinor] = Vector2(0.0, 0.0);
p_AreaMinor[iMinor] = 1.0e-12;
memset(&(p_MAR_ion[iMinor]), 0, sizeof(f64_vec3));
memset(&(p_MAR_elec[iMinor]), 0, sizeof(f64_vec3));
}
else {
Our_integral_curl_Az.x = 0.0;
Our_integral_curl_Az.y = 0.0;
Our_integral_grad_Azdot.x = 0.0;
Our_integral_grad_Azdot.y = 0.0;
Our_integral_grad_Az.x = 0.0;
Our_integral_grad_Az.y = 0.0;
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
f64 AreaMinor_for_A = 0.0;
short iprev, inext, i;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
memcpy(&MAR_ion, p_MAR_ion + iMinor, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec + iMinor, sizeof(f64_vec3));
iprev = 5;
i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prevT = shared_T[izNeighMinor[iprev] - StartMinor];
prevAzdot = shared_Azdot[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevAzdot = shared_Azdot_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevT = shared_T_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
T3 prev_T = p_T_minor[izNeighMinor[iprev]];
prevT.Te = prev_T.Te; prevT.Ti = prev_T.Ti;
AAdot temp = p_AAdot[izNeighMinor[iprev]];
prevAz = temp.Az;
prevAzdot = temp.Azdot;
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
oppT = shared_T[izNeighMinor[i] - StartMinor];
oppAzdot = shared_Azdot[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppAzdot = shared_Azdot_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppT = shared_T_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
T3 opp_T = p_T_minor[izNeighMinor[i]];
oppT.Te = opp_T.Te; oppT.Ti = opp_T.Ti;
AAdot temp = p_AAdot[izNeighMinor[i]];
oppAz = temp.Az;
oppAzdot = temp.Azdot;
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
// indexminor sequence:
// 0 = corner 0
// 1 = neighbour 2
// 2 = corner 1
// 3 = neighbour 0
// 4 = corner 2
// 5 = neighbour 1
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
// Pathological case: OUTERMOST vertex where neigh_len is not correct to take as == tri_len
// !
// ///////////////////////////////////////////////////////////////////////////////////////////
// [0] is on our clockwise side rel to [1]. That means it is anticlockwise for the vertex.
// That means we interpolate with the value from next tri around.
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
//This matches a diagram:
//
// 2---(4)----(3)---1 = corner 1 = indexminor 2: (2,3)
// \ / \ /
// \/ \ /
// (5\ (2/ indexminor 1 = neighbour 2: (1,2)
// \ /
// \0)--(1/
// \ _/
// 0 = corner 0 = indexminor0
};
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextT = shared_T[izNeighMinor[inext] - StartMinor];
nextAzdot = shared_Azdot[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextAzdot = shared_Azdot_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextT = shared_T_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[inext]];
nextAz = temp.Az;
nextAzdot = temp.Azdot;
T3 next_T = p_T_minor[izNeighMinor[inext]];
nextT.Te = next_T.Te; nextT.Ti = next_T.Ti;
next_T = p_T_minor[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0); // looks anticlockwise
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
//if ((i % 2 == 0) || ((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
// We have to not muck around with prevpos because here it's being used for A.
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
f64 Azdot_edge = SIXTH * (2.0*ourAzdot + 2.0*oppAzdot + prevAzdot + nextAzdot);
Our_integral_grad_Azdot += Azdot_edge * edge_normal;
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
AreaMinor_for_A += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
T3 T0, T1; // waste of registers
f64 n1;
T0.Te = THIRD* (prevT.Te + shared_T[threadIdx.x].Te + oppT.Te);
T1.Te = THIRD * (nextT.Te + shared_T[threadIdx.x].Te + oppT.Te);
T0.Ti = THIRD * (prevT.Ti + shared_T[threadIdx.x].Ti + oppT.Ti);
T1.Ti = THIRD * (nextT.Ti + shared_T[threadIdx.x].Ti + oppT.Ti); // assumes point is at simple average of tri and vert centres.
n0 = n_array[i];
n1 = n_array[inext]; // !
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
// typical edge
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
} else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// endpt0 = THIRD * (prevpos + info.pos + opppos);
// endpt1 = THIRD * (nextpos + info.pos + opppos);
// edge_normal.x = endpt1.y - endpt0.y;
// edge_normal.y = endpt0.x - endpt1.x;
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER-r2) / (r1-r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
} else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// set nT on the edge: try just the average of the two nT, weighted by distance to own centre.
// Recall periodic when we look at distance to own centre.
f64 nTi_edge = 0.5*(p_n_minor[iMinor].n*shared_T[threadIdx.x].Ti + p_n_minor[izNeighMinor[i]].n*oppT.Ti);
f64 nTe_edge = 0.5*(p_n_minor[iMinor].n*shared_T[threadIdx.x].Te + p_n_minor[izNeighMinor[i]].n*oppT.Te);
MAR_ion -= Make3(nTi_edge*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(nTe_edge*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(shared_T[threadIdx.x].Te + oppT.Te) * edge_normal;
} else {
// looking out the bottom of the insulator triangle at a within-insulator vertex or triangle.
// so we want to project the point up to the insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
// endpt0 is THIRD * (prevpos + info.pos + opppos)
// move towards the position that is 2 previous --- ie the vertex above.
// (Don't forget PBC.)
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
} else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
// Don't forget PBC
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
} else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
}
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
f64 nTi_edge = p_n_minor[iMinor].n*shared_T[threadIdx.x].Ti;
f64 nTe_edge = p_n_minor[iMinor].n*shared_T[threadIdx.x].Te;
MAR_ion -= Make3(nTi_edge*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(nTe_edge*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += shared_T[threadIdx.x].Te * edge_normal;
// will be a 0 contribution if endpt1 = endpt0, that's ok.
};
};
} else {
// Typical tri.
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
};
if (TESTTRI) {
printf("GPU : %d : contribs MAR_ion.y %1.11E MAR_elec.y %1.11E \n"
"n0 %1.10E n1 %1.10E Ti0 %1.10E Ti1 %1.10E edgenormal.y %1.10E\n",
CHOSEN,
-0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.y,
-0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal.y,
n0, n1, T0.Ti, T1.Ti, edge_normal.y);
}
// Having a real problem with AreaMinor.
// All very well how it is used here but the one we should record, for creating N and hence pressure effects ..
// is the one that comes from rolling points upwards.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x; // Area to save.
// See a way that FP accuracy was eroded: we take a difference of two close things already to get edge_normal.
// can that be cleverly avoided? For all calcs?
endpt0 = endpt1;
n0 = n1;
iprev = i;
prevpos = opppos;
prevAz = oppAz;
prevAzdot = oppAzdot;
prevT = oppT;
opppos = nextpos;
oppAz = nextAz;
oppAzdot = nextAzdot;
oppT = nextT;
};
/*if (info.flag == CROSSING_INS) {
// In this case set v_r = 0 and set a_TP_r = 0 and dv/dt _r = 0 in general
//f64_vec2 rhat = info.pos / info.pos.modulus();
MAR_ion -= Make3(
(MAR_ion.dotxy(info.pos) /
(info.pos.x*info.pos.x + info.pos.y*info.pos.y))*info.pos, 0.0);
MAR_elec -= Make3(
(MAR_elec.dotxy(info.pos) /
(info.pos.x*info.pos.x + info.pos.y*info.pos.y))*info.pos, 0.0);
no
// and we looked at insulator values for T so Grad Te was meaningless:
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
// I think we do need to make v_r = 0. It's common sense that it IS 0
// since we site our v_r estimate on the insulator. Since it is sited there,
// it is used for traffic into the insulator by n,nT unless we pick out
// insulator-abutting cells on purpose.
// However, we then should make an energy correction -- at least if
// momentum is coming into this minor cell and being destroyed.
// Doesn't quite work like that. We do not destroy, we just do not store a value for the mom in the domain part of cell.
};*/
/*
p_GradAz[iMinor] = Our_integral_grad_Az / AreaMinor_for_A;
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor_for_A;
p_GradTe[iMinor] = Our_integral_grad_Te / AreaMinor;
p_B[iMinor] = Make3(Our_integral_curl_Az / AreaMinor_for_A, BZ_CONSTANT);
p_AreaMinor[iMinor] = AreaMinor;
// wow :
f64_vec2 overall_v_ours = p_v_overall_minor[iMinor];
ROCAzduetoAdvection[iMinor] = overall_v_ours.dot(Our_integral_grad_Az / AreaMinor);
ROCAzdotduetoAdvection[iMinor] = overall_v_ours.dot(Our_integral_grad_Azdot / AreaMinor);
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iMinor, &(MAR_ion), sizeof(f64_vec3));
memcpy(p_MAR_elec + iMinor, &(MAR_elec), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
iprev = 5; i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prevAzdot = shared_Azdot[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevAzdot = shared_Azdot_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[iprev]];
prevAz = temp.Az;
prevAzdot = temp.Azdot;
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
oppAzdot = shared_Azdot[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppAzdot = shared_Azdot_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[i]];
oppAz = temp.Az;
oppAzdot = temp.Azdot;
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextAzdot = shared_Azdot[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextAzdot = shared_Azdot_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[inext]];
nextAz = temp.Az;
nextAzdot = temp.Azdot;
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0); // looks anticlockwise
integ_grad_Az.x = 0.5*(
(ourAz + nextAz)*(info.pos.y - nextpos.y)
+ (prevAz + ourAz)*(prevpos.y - info.pos.y)
+ (oppAz + prevAz)*(opppos.y - prevpos.y)
+ (nextAz + oppAz)*(nextpos.y - opppos.y)
);
integ_grad_Az.y = -0.5*( // notice minus
(ourAz + nextAz)*(info.pos.x - nextpos.x)
+ (prevAz + ourAz)*(prevpos.x - info.pos.x)
+ (oppAz + prevAz)*(opppos.x - prevpos.x)
+ (nextAz + oppAz)*(nextpos.x - opppos.x)
);
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
//f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
// if ((i % 2 == 0) || // vertex neigh
// ((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
(opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
f64 Azdot_edge = SIXTH * (2.0*ourAzdot + 2.0*oppAzdot + prevAzdot + nextAzdot);
Our_integral_grad_Azdot += Azdot_edge * edge_normal;
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
// minus
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
iprev = i;
prevpos = opppos;
prevAz = oppAz;
prevAzdot = oppAzdot;
opppos = nextpos;
oppAz = nextAz;
oppAzdot = nextAzdot;
};
p_GradAz[iMinor] = Our_integral_grad_Az / AreaMinor;
p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_B[iMinor] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT);
p_AreaMinor[iMinor] = AreaMinor;
ROCAzduetoAdvection[iMinor] = 0.0;
ROCAzdotduetoAdvection[iMinor] = 0.0;
} // non-domain tri
}; // was it FRILL
// Okay. While we have n_shards in memory we could proceed to overwrite with vxy.
// But get running first before using union and checking same.
}*/
__global__ void kernelCreate_pressure_gradT_and_gradA_CurlA_minor_noadvect(
structural * __restrict__ p_info_minor,
T3 * __restrict__ p_T_minor,
AAdot * __restrict__ p_AAdot,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just so we can handle insulator
bool * __restrict__ bz_pressureflag,
f64_vec2 * __restrict__ p_GradTe,
f64_vec2 * __restrict__ p_GradAz,
f64_vec3 * __restrict__ p_B
)
{
// Getting this down to 8 vars we could have 512 threads (12 vars/thread total with vertex vars)
__shared__ T2 shared_T[threadsPerTileMinor];
__shared__ f64 shared_Az[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ ShardModel shared_n_shards[threadsPerTileMajor]; // 5 + 13/2 + below, 2.5 -> 14 doubles.
// Tile size is 256 though so 14 doubles will allow 1x to run. We have extra shared space if we need it.
// We could also argue that with shards for n_ion in memory we are better off doing an overwrite and doing stuff for nv also.
// never mind that for now. <-- ?
// 2019: Hang on. Why did I use shards? It's quite a good idea. If we are flat then the pressure lands more on the triangles
// at the interface. Makes a consistent set of values of n to pave the space.
__shared__ T2 shared_T_verts[threadsPerTileMajor];
__shared__ f64 shared_Az_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
// There is a good argument for splitting out A,Adot to a separate routine.
// That way we could have 10.5 => 585 ie 576 = 288*2 threads.
// Here we got (2+1+1+2)*1.5 = 9 , + 6.5 = 15.5 -> 384 minor threads max.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
{
AAdot temp = p_AAdot[iMinor];
shared_Az[threadIdx.x] = temp.Az;
}
{
T3 T_ = p_T_minor[iMinor];
shared_T[threadIdx.x].Te = T_.Te;
shared_T[threadIdx.x].Ti = T_.Ti;
}
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
AAdot temp = p_AAdot[iVertex + BEGINNING_OF_CENTRAL];
shared_Az_verts[threadIdx.x] = temp.Az;
T3 T_ = p_T_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_T_verts[threadIdx.x].Te = T_.Te;
shared_T_verts[threadIdx.x].Ti = T_.Ti; // MOVED THIS OUT OF the following branch to see it match CPU
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
}
else {
// save several bus trips;
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
//shared_T_verts[threadIdx.x].Te = 0.0;
//shared_T_verts[threadIdx.x].Ti = 0.0;
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
};
};
__syncthreads();
f64 ourAz, oppAz, prevAz, nextAz;
//f64 ourAzdot, oppAzdot, prevAzdot, nextAzdot;
f64_vec2 opppos, prevpos, nextpos, edge_normal;
T2 oppT, prevT, nextT;
//nvals our_n, opp_n, prev_n, next_n;
f64_vec2 Our_integral_curl_Az, Our_integral_grad_Az, Our_integral_grad_Te;
f64 Our_integral_Lap_Az;
if (threadIdx.x < threadsPerTileMajor) {
Our_integral_curl_Az.x = 0.0;
Our_integral_curl_Az.y = 0.0;
Our_integral_grad_Az.x = 0.0;
Our_integral_grad_Az.y = 0.0;
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
f64_vec3 MAR_ion, MAR_elec;
memcpy(&(MAR_ion), &(p_MAR_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
memcpy(&(MAR_elec), &(p_MAR_elec[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
ourAz = shared_Az_verts[threadIdx.x];
bool bPressure = bz_pressureflag[iVertex];
// True for DOMAIN_VERTEX, unless you've got a crossing_cath in which case it's false.
if (bPressure) {
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevT = shared_T[izTri[iprev] - StartMinor];
prevAz = shared_Az[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
T3 prev_T = p_T_minor[izTri[iprev]];
prevT.Te = prev_T.Te; prevT.Ti = prev_T.Ti;
AAdot temp = p_AAdot[izTri[iprev]];
prevAz = temp.Az;
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppT = shared_T[izTri[i] - StartMinor];
oppAz = shared_Az[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
} else {
T3 opp_T = p_T_minor[izTri[i]];
oppT.Te = opp_T.Te; oppT.Ti = opp_T.Ti;
AAdot temp = p_AAdot[izTri[i]];
oppAz = temp.Az;
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt1, endpt0 = THIRD * (info.pos + opppos + prevpos);
short iend = tri_len;
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextT = shared_T[izTri[inext] - StartMinor];
nextAz = shared_Az[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
T3 next_T = p_T_minor[izTri[inext]];
nextT.Te = next_T.Te; nextT.Ti = next_T.Ti;
AAdot temp = p_AAdot[izTri[inext]];
nextAz = temp.Az;
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
T2 T0, T1;
f64 n1;
T0.Te = THIRD* (prevT.Te + shared_T_verts[threadIdx.x].Te + oppT.Te);
T1.Te = THIRD * (nextT.Te + shared_T_verts[threadIdx.x].Te + oppT.Te);
T0.Ti = THIRD * (prevT.Ti + shared_T_verts[threadIdx.x].Ti + oppT.Ti);
T1.Ti = THIRD * (nextT.Ti + shared_T_verts[threadIdx.x].Ti + oppT.Ti);
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
// So this is pretty stupid ---
// If shardmodel went for flat then we have decided that there is no pressure gradient affecting v here.
// Mind you we didn't expect it to be flat nearly as often as it is flat.
// Think carefully about what pressure we want to feel.
// It makes a kind of sense if you have a cliff of density then you feel it in the triangle in between.
// ***************************************************************************************
// But that won't push points apart. It just sends stuff through the wall.
// ***************************************************************************************
// Hmm.
// It's a shame we can't just use actual n values to infer gradient over a region.
// It probably creates wobbles in v as well, because if we move fast particles at edge then we leave
// Behind a still-lower v in the vertex-centered minor. <-- yes, this instability is clear in practice.
// The scheme is kind of skewiffifying.
// Assume neighs 0,1 are relevant to border with tri 0 minor
// To get integral grad we add the averages along the edges times edge_normals
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
if (TESTPRESSUREY) {
printf("Pressure vertex %d MAR_ion.y %1.9E contrib.y %1.9E n0 %1.9E Ti0 %1.9E n1 %1.9E Ti1 %1.9E edge_normal.y %1.9E \n",
VERTCHOSEN, MAR_ion.y,
-0.5*(n0*T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.y,
n0, T0.Ti, n1, T1.Ti, edge_normal.y);
}
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
// if (iVertex + BEGINNING_OF_CENTRAL == CHOSEN)
// printf("GPU %d : GradTe contrib %1.14E %1.14E Te %1.14E opp %1.14E next %1.14E prev %1.14E edge_normal %1.14E %1.14E\n", iVertex + BEGINNING_OF_CENTRAL,
// 0.5*(T0.Te + T1.Te) * edge_normal.x,
//0.5*(T0.Te + T1.Te) * edge_normal.y,
// shared_T_verts[threadIdx.x].Te, oppT.Te, nextT.Te, prevT.Te,
//edge_normal.x, edge_normal.y);
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
// Introduced minus because we otherwise are getting negative of curl.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prevAz = oppAz;
prevT = oppT;
opppos = nextpos;
oppAz = nextAz;
oppT = nextT;
}; // next i
p_GradAz[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Az / AreaMinor;
p_GradTe[iVertex + BEGINNING_OF_CENTRAL] = Our_integral_grad_Te / AreaMinor;
p_B[iVertex + BEGINNING_OF_CENTRAL] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT);
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iVertex + BEGINNING_OF_CENTRAL, &MAR_ion, sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex + BEGINNING_OF_CENTRAL, &MAR_elec, sizeof(f64_vec3));
} else {
Vector2 zero(0.0, 0.0);
p_GradAz[iVertex + BEGINNING_OF_CENTRAL] = zero;
p_GradTe[iVertex + BEGINNING_OF_CENTRAL] = zero;
p_B[iVertex + BEGINNING_OF_CENTRAL] = Make3(zero, BZ_CONSTANT);
// we certainly could still calculate B, though that was not how this was before.
}; // bPressure
};// if (threadIdx.x < threadsPerTileMajor)
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
ourAz = shared_Az[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// T2 prevT, nextT, oppT;
//f64 prevAz, nextAz, oppAz, ourAz;
//f64 prevAzdot, nextAzdot, oppAzdot, ourAzdot;
f64_vec3 MAR_ion,MAR_elec;
// this is not a clever way of doing it. Want more careful.
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
if ((izNeighMinor[3] >= StartMinor) && (izNeighMinor[3] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[3] - StartMinor];
} else {
AAdot temp = p_AAdot[izNeighMinor[3]];
oppAz = temp.Az;
};
// p_LapAz[iMinor] = oppAz - ourAz; // OBSOLETE ---- need to delete this from routine.
p_GradAz[iMinor] = Vector2(0.0, 0.0);
memset(&(p_B[iMinor]), 0, sizeof(f64_vec3));
p_GradTe[iMinor] = Vector2(0.0, 0.0);
// p_AreaMinor[iMinor] = 1.0e-12;
memset(&(p_MAR_ion[iMinor]), 0, sizeof(f64_vec3));
memset(&(p_MAR_elec[iMinor]), 0, sizeof(f64_vec3));
} else {
Our_integral_curl_Az.x = 0.0;
Our_integral_curl_Az.y = 0.0;
Our_integral_grad_Az.x = 0.0;
Our_integral_grad_Az.y = 0.0;
Our_integral_grad_Te.x = 0.0;
Our_integral_grad_Te.y = 0.0;
Our_integral_Lap_Az = 0.0;
f64 AreaMinor = 0.0;
f64 AreaMinor_for_A = 0.0;
short iprev, inext, i;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
memcpy(&MAR_ion, p_MAR_ion + iMinor, sizeof(f64_vec3));
memcpy(&MAR_elec, p_MAR_elec + iMinor, sizeof(f64_vec3));
iprev = 5;
i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prevT = shared_T[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevT = shared_T_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
T3 prev_T = p_T_minor[izNeighMinor[iprev]];
prevT.Te = prev_T.Te; prevT.Ti = prev_T.Ti;
AAdot temp = p_AAdot[izNeighMinor[iprev]];
prevAz = temp.Az;
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
oppT = shared_T[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppT = shared_T_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
T3 opp_T = p_T_minor[izNeighMinor[i]];
oppT.Te = opp_T.Te; oppT.Ti = opp_T.Ti;
AAdot temp = p_AAdot[izNeighMinor[i]];
oppAz = temp.Az;
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
// indexminor sequence:
// 0 = corner 0
// 1 = neighbour 2
// 2 = corner 1
// 3 = neighbour 0
// 4 = corner 2
// 5 = neighbour 1
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
// &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
// Pathological case: OUTERMOST vertex where neigh_len is not correct to take as == tri_len
// !
// ///////////////////////////////////////////////////////////////////////////////////////////
// [0] is on our clockwise side rel to [1]. That means it is anticlockwise for the vertex.
// That means we interpolate with the value from next tri around.
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
if (TESTTRI)
printf("%d 01A n_array 01 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[0], n_array[1], cornerindex.i1,
shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I],
shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
// the first two entries
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
} else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
// n1 goes with "prev" -- did I do that on purpose?
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
} else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
if (TESTTRI)
printf("%d 01B n_array 01 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[0], n_array[1], cornerindex.i1,
p_n_shards[cornerindex.i1].n[who_am_I],
p_n_shards[cornerindex.i1].n_cent);
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
if (TESTTRI)
printf("%d 23A n_array 23 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[2], n_array[3], cornerindex.i2,
shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I],
shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
if (TESTTRI)
printf("%d 23B n_array 23 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[2], n_array[3], cornerindex.i2,
p_n_shards[cornerindex.i2].n[who_am_I],
p_n_shards[cornerindex.i2].n_cent);
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
if (TESTTRI)
printf("%d 45A n_array 45 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[4], n_array[5], cornerindex.i3,
shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I],
shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
} else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
if (TESTTRI)
printf("%d 45B n_array 45 %1.9E %1.9E corner %d n_shards[who_am_I] %1.9E n_cent %1.9E",
iMinor, n_array[4], n_array[5], cornerindex.i3,
p_n_shards[cornerindex.i3].n[who_am_I],
p_n_shards[cornerindex.i3].n_cent);
//This matches a diagram:
//
// 2---(4)----(3)---1 = corner 1 = indexminor 2: (2,3)
// \ / \ /
// \/ \ /
// (5\ (2/ indexminor 1 = neighbour 2: (1,2)
// \ /
// \0)--(1/
// \ _/
// 0 = corner 0 = indexminor0
};
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextT = shared_T[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextT = shared_T_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[inext]];
nextAz = temp.Az;
T3 next_T = p_T_minor[izNeighMinor[inext]];
nextT.Te = next_T.Te; nextT.Ti = next_T.Ti;
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
AreaMinor_for_A += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
//integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
//integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
//f64 area_quadrilateral = 0.5*(
// (info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
// + (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
// + (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
// + (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
// );
////f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
////if ((i % 2 == 0) || ((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
//if ( (opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
// (opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
// Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
//// Modified here ..
T3 T0, T1; // waste of registers
f64 n1;
T0.Te = THIRD* (prevT.Te + shared_T[threadIdx.x].Te + oppT.Te);
T1.Te = THIRD * (nextT.Te + shared_T[threadIdx.x].Te + oppT.Te);
T0.Ti = THIRD * (prevT.Ti + shared_T[threadIdx.x].Ti + oppT.Ti);
T1.Ti = THIRD * (nextT.Ti + shared_T[threadIdx.x].Ti + oppT.Ti);
n0 = n_array[i];
n1 = n_array[inext]; // !
// To get integral grad we add the averages along the edges times edge_normals
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
// typical edge
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
} else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
// Or allowed a below-ins value to affect something anyway.
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
}
else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// set nT on the edge: try just the average of the two nT, weighted by distance to own centre.
// Recall periodic when we look at distance to own centre.
f64 nTi_edge = 0.5*(p_n_minor[iMinor].n*shared_T[threadIdx.x].Ti + p_n_minor[izNeighMinor[i]].n*oppT.Ti);
f64 nTe_edge = 0.5*(p_n_minor[iMinor].n*shared_T[threadIdx.x].Te + p_n_minor[izNeighMinor[i]].n*oppT.Te);
MAR_ion -= Make3(nTi_edge*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(nTe_edge*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(shared_T[threadIdx.x].Te + oppT.Te) * edge_normal;
} else {
// looking out the bottom of the insulator triangle at a within-insulator vertex or triangle.
// so we want to project the point up to the insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
// endpt0 is THIRD * (prevpos + info.pos + opppos)
// move towards the position that is 2 previous --- ie the vertex above.
// (Don't forget PBC.)
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
} else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
// Don't forget PBC
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
} else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
}
f64 nTi_edge = p_n_minor[iMinor].n*shared_T[threadIdx.x].Ti;
f64 nTe_edge = p_n_minor[iMinor].n*shared_T[threadIdx.x].Te;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
MAR_ion -= Make3(nTi_edge*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(nTe_edge*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += shared_T[threadIdx.x].Te * edge_normal;
// will be a 0 contribution if endpt1 = endpt0, that's ok.
};
}; // domain triangle opposite or not
} else {
// Typical tri.
MAR_ion -= Make3(0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal, 0.0);
MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
Our_integral_grad_Te += 0.5*(T0.Te + T1.Te) * edge_normal;
};
if (TESTTRI) {
printf("pressure %d : contribs MAR_ion.x %1.11E MAR_elec.x %1.11E \n"
"contribs MAR_ion.y %1.11E MAR_elec.y %1.11E \n"
"n0 %1.10E n1 %1.10E Ti0 %1.10E Ti1 %1.10E edgenormal %1.9E %1.9E\n",
iMinor,
-0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.x,
-0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal.x,
-0.5*(n0 * T0.Ti + n1 * T1.Ti)*over_m_i*edge_normal.y,
-0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal.y,
n0, n1, T0.Ti, T1.Ti, edge_normal.x, edge_normal.y);
}
// if (Az_edge != Az_edge) {
// printf("GPU : %d : Az_edge %1.9E ourAz %1.9E oppAz %1.9E \n ourintegralgradTe %1.9E %1.9E contrib %1.9E %1.9E T01 %1.9E %1.9E edgenormal %1.9E %1.9E\n"
// "prevT.Te %1.9E ourT.Te %1.9E oppT.Te %1.9E nextT.Te %1.9E \n",
// iMinor, Az_edge, ourAz, oppAz,
// Our_integral_grad_Te.x, Our_integral_grad_Te.y,
// 0.5*(T0.Te + T1.Te) * edge_normal.x, 0.5*(T0.Te + T1.Te) * edge_normal.y,
// T0.Te, T1.Te, edge_normal.x, edge_normal.y,
// prevT.Te, shared_T[threadIdx.x].Te,oppT.Te,nextT.Te
// );
// }
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
//
// if ((TESTTRI))
// printf("GPU AreaMinor %d : %1.14E from += %1.14E : endpt0.x %1.14E endpt1.x %1.14E edge_normal.x %1.14E\n"
// "endpt1.y endpt0.y %1.14E %1.14E \n",
// iMinor, AreaMinor, (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x,
// endpt0.x, endpt1.x, edge_normal.x,
// endpt1.y, endpt0.y);
// See a way that FP accuracy was eroded: we take a difference of two close things already to get edge_normal.
// can that be cleverly avoided? For all calcs?
endpt0 = endpt1;
n0 = n1;
iprev = i;
prevpos = opppos;
prevAz = oppAz;
// prevAzdot = oppAzdot;
prevT = oppT;
opppos = nextpos;
oppAz = nextAz;
// oppAzdot = nextAzdot;
oppT = nextT;
};
// No setting a_r = 0
p_GradAz[iMinor] = Our_integral_grad_Az / AreaMinor_for_A;
p_GradTe[iMinor] = Our_integral_grad_Te / AreaMinor;
p_B[iMinor] = Make3(Our_integral_curl_Az / AreaMinor_for_A, BZ_CONSTANT);
// p_AreaMinor[iMinor] = AreaMinor;
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iMinor, &(MAR_ion), sizeof(f64_vec3));
memcpy(p_MAR_elec + iMinor, &(MAR_elec), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
// We do not need B or Grad A outside of the domain. !
iprev = 5; i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevAz = shared_Az[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevAz = shared_Az_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[iprev]];
prevAz = temp.Az;
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
oppAz = shared_Az[izNeighMinor[i] - StartMinor];
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
oppAz = shared_Az_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[i]];
oppAz = temp.Az;
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextAz = shared_Az[izNeighMinor[inext] - StartMinor];
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextAz = shared_Az_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
AAdot temp = p_AAdot[izNeighMinor[inext]];
nextAz = temp.Az;
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// Az_edge = SIXTH * (2.0*ourdata.Az + 2.0*oppAz + prevAz + nextAz);
// Our_integral_grad_Az += Az_edge * edge_normal;
// Our_integral_curl_Az += Az_edge * (endpt1 - endpt0); // looks anticlockwise
// integ_grad_Az.x = 0.5*(
// (ourAz + nextAz)*(info.pos.y - nextpos.y)
// + (prevAz + ourAz)*(prevpos.y - info.pos.y)
// + (oppAz + prevAz)*(opppos.y - prevpos.y)
// + (nextAz + oppAz)*(nextpos.y - opppos.y)
// );
// integ_grad_Az.y = -0.5*( // notice minus
// (ourAz + nextAz)*(info.pos.x - nextpos.x)
// + (prevAz + ourAz)*(prevpos.x - info.pos.x)
// + (oppAz + prevAz)*(opppos.x - prevpos.x)
// + (nextAz + oppAz)*(nextpos.x - opppos.x)
// );
// f64 area_quadrilateral = 0.5*(
// (info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
// + (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
// + (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
// + (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
// );
// //f64_vec2 grad_Az = integ_grad_Az / area_quadrilateral;
//// if ((i % 2 == 0) || // vertex neigh
//// ((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
// if ((opppos.dot(opppos) < 0.9999*0.9999*FRILL_CENTROID_OUTER_RADIUS_d*FRILL_CENTROID_OUTER_RADIUS_d) &&
// (opppos.dot(opppos) > 1.0001*1.0001*FRILL_CENTROID_INNER_RADIUS_d*FRILL_CENTROID_INNER_RADIUS_d))
// Our_integral_Lap_Az += integ_grad_Az.dot(edge_normal) / area_quadrilateral;
f64 Az_edge = SIXTH * (2.0*ourAz + 2.0*oppAz + prevAz + nextAz);
Our_integral_grad_Az += Az_edge * edge_normal;
Our_integral_curl_Az -= Az_edge * (endpt1 - endpt0);
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
prevAz = oppAz;
opppos = nextpos;
oppAz = nextAz;
};
p_GradAz[iMinor] = Our_integral_grad_Az / AreaMinor;
// p_LapAz[iMinor] = Our_integral_Lap_Az / AreaMinor;
p_B[iMinor] = Make3(Our_integral_curl_Az / AreaMinor, BZ_CONSTANT);
// p_AreaMinor[iMinor] = AreaMinor;
} // non-domain tri
}; // was it FRILL
// Okay. While we have n_shards in memory we could proceed to overwrite with vxy.
// But get running first before using union and checking same.
}
__global__ void kernelCreate_momflux_minor(
structural * __restrict__ p_info_minor,
v4 * __restrict__ p_vie_minor,
f64_vec2 * __restrict__ p_v_overall_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
f64_vec3 * __restrict__ p_MAR_neut,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor,
NTrates * __restrict__ NT_addition_tri // inevitable
)
{
__shared__ v4 shared_vie[threadsPerTileMinor];
__shared__ f64_vec2 shared_v_overall[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
__shared__ v4 shared_vie_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_v_overall_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_vie[threadIdx.x] = p_vie_minor[iMinor];
shared_v_overall[threadIdx.x] = p_v_overall_minor[iMinor];
// Perhaps the real answer is this. Advection and therefore advective momflux
// do not need to be recalculated very often at all. At 1e6 cm/s, we aim for 1 micron,
// get 1e-10s to actually do the advection !!
// So an outer cycle. Still limiting the number of total things in a minor tile. We might like 384 = 192*2.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if (info.flag == DOMAIN_VERTEX) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
memcpy(&(shared_vie_verts[threadIdx.x]), &(p_vie_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(v4));
shared_v_overall_verts[threadIdx.x] = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
memset(&(shared_vie_verts[threadIdx.x]), 0, sizeof(v4)); // this was always a bug as long as we had traffic near outermost!
memset(&(shared_v_overall_verts[threadIdx.x]), 0, sizeof(f64_vec2)); // it actually is zero at outermost
if (info.flag == OUTERMOST)
memcpy(&(shared_vie_verts[threadIdx.x]), &(p_vie_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(v4));
};
};
__syncthreads();
v4 our_v, opp_v, prev_v, next_v;
f64_vec2 our_v_overall, prev_v_overall, next_v_overall, opp_v_overall;
f64_vec2 opppos, prevpos, nextpos;
if (threadIdx.x < threadsPerTileMajor) {
three_vec3 ownrates;
memcpy(&(ownrates.ion), &(p_MAR_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
memcpy(&(ownrates.elec), &(p_MAR_elec[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
our_v = shared_vie_verts[threadIdx.x];
our_v_overall = shared_v_overall_verts[threadIdx.x];
if (info.flag == DOMAIN_VERTEX) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vie[izTri[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_vie_minor[izTri[iprev]];
prev_v_overall = p_v_overall_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
prev_v_overall = Clockwise_d*prev_v_overall;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
prev_v_overall = Anticlockwise_d*prev_v_overall;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vie[izTri[i] - StartMinor];
opp_v_overall = shared_v_overall[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_vie_minor[izTri[i]];
opp_v_overall = p_v_overall_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
// Think carefully: DOMAIN vertex cases for n,T ...
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64 vez0, viz0, vez1, viz1;
f64_vec2 vxy0, vxy1, endpt1, edge_normal;
short iend = tri_len;
// We deal with DOMAIN_VERTEX only!!
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vie[izTri[inext] - StartMinor];
next_v_overall = shared_v_overall[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_vie_minor[izTri[inext]];
next_v_overall = p_v_overall_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
next_v_overall = Anticlockwise_d*next_v_overall;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 n1;
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
// Assume neighs 0,1 are relevant to border with tri 0 minor.
// *********
// Verify that tri 0 is formed from our vertex, neigh 0 and neigh 1; - tick I think
// *********
vxy0 = THIRD * (our_v.vxy + prev_v.vxy + opp_v.vxy);
vxy1 = THIRD * (our_v.vxy + opp_v.vxy + next_v.vxy);
vez0 = THIRD * (our_v.vez + opp_v.vez + prev_v.vez);// not used?
viz0 = THIRD * (our_v.viz + opp_v.viz + prev_v.viz);// not used?
vez1 = THIRD * (our_v.vez + opp_v.vez + next_v.vez);// not used?
viz1 = THIRD * (our_v.viz + opp_v.viz + next_v.viz); // not used?
f64 relvnormal = 0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
// In reasonable conditions I suppose that is something sensible.
// However if we use n v_edge relvnormal then from a fast upwind cell we are always ejecting the slowest material!
// That is unstable.
// We could profitably create a minmod model of velocity.
// However for now let's try pretending there is a shock front (so use average v for advection) and the upwind nv
// to advect is just the upwind cell average.
// FIX FOR NOW, 22/11/20 :
// We do not allow traffic from insulator-crossing triangles to/from vertex minors.
// This is because we can't have an intermediate cell of momentum within a density cell that has only one end.
// ===========================================================================================================
int neighflag = p_info_minor[izTri[i]].flag;
if (neighflag == DOMAIN_TRIANGLE) {
if (relvnormal > 0.0) {
// losing stuff
ownrates.ion -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.viz);
ownrates.elec -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.vez);
if (TESTADVECTZ) {
printf("GPUadvect %d izTri[%d] %d USING our vez %1.9E [ oppvez %1.9E ] relvnormaldot %1.9E \n"
"gaining mom %1.9E | n0 %1.9E n1 %1.9E n %1.9E ncent %1.9E edge_normal %1.8E %1.8E vuse %1.8E %1.8E\n",
iVertex, i, izTri[i], our_v.vez, opp_v.vez, relvnormal,
-0.5*relvnormal*(n0 + n1)*our_v.vez, n0, n1, 0.5*(n0 + n1), shared_n_shards[threadIdx.x].n_cent,
edge_normal.x, edge_normal.y,
0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))).x,
0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))).y);
};
}
else {
ownrates.ion -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.viz);
ownrates.elec -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.vez);
if (TESTADVECTZ) {
printf("GPUadvect %d izTri[%d] %d our vez %1.9E [USING oppvez %1.9E ] relvnormaldot %1.9E \n"
"gaining mom %1.9E | n0 %1.9E n1 %1.9E n %1.9E ncent %1.9E edge_normal %1.8E %1.8E vuse %1.8E %1.8E\n",
VERTCHOSEN, i, izTri[i], our_v.vez, opp_v.vez, relvnormal,
-0.5*relvnormal*(n0 + n1)*opp_v.vez, n0, n1, 0.5*(n0 + n1), shared_n_shards[threadIdx.x].n_cent,
edge_normal.x, edge_normal.y,
0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))).x,
0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))).y);
};
//We are using upwind v ... however, n0 came from ourselves because we look out of our own minor into a triangle.
// Why it's minus? : relvnormal was less than zero but we gain a positive amt of opp_v.
};
};
// vie.vez = (vie_k.vez*Nk + h_use * MAR.z) / Nplus;
// OLD, unstable :
//ownrates.ion -= 0.5*relvnormal*(n0 *(Make3(vxy0 - our_v.vxy, viz0 - our_v.viz) + n1*(Make3(vxy1 - our_v.vxy, viz1 - our_v.viz))));
if (TESTADVECT) {
printf("GPUadvect %d izTri[%d] %d ownrates.ion.y %1.9E contrib.y %1.9E %1.9E [ours,>0 out,<0] \n"
"relvnormal %1.10E n0 %1.9E n1 %1.9E vxy0.y %1.8E vxy1.y %1.8E\n"
"edge_normal %1.8E %1.8E our_v.y %1.8E opp_v.y %1.8E prev_v.y %1.8E next_v.y %1.8E\n",
VERTCHOSEN, i, izTri[i], ownrates.ion.y,
-0.5*relvnormal*(n0 + n1)*our_v.vxy.y, -0.5*relvnormal*(n0 + n1)*opp_v.vxy.y,
relvnormal, n0, n1, vxy0.y, vxy1.y,
edge_normal.x, edge_normal.y,
our_v.vxy.y, opp_v.vxy.y, prev_v.vxy.y, next_v.vxy.y);
};
// ______________________________________________________
//// whether the v that is leaving is greater than our v ..
//// Formula:
//// dv/dt = (d(Nv)/dt - dN/dt v) / N
//// We include the divide by N when we enter the accel routine.
// Somehow we've created an unstable situ. We are chucking out high-nv at the top. higher n and lower v than in our triangle.
// Should we insist on upwind v as what is carried?
//
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
}; // next i
// AreaMinor is not saved, or even calculated for tris.
// No neutral stuff in this kernel, momrates should be set now:
memcpy(p_MAR_ion + iVertex + BEGINNING_OF_CENTRAL, &(ownrates.ion), sizeof(f64_vec3));
memcpy(p_MAR_elec + iVertex + BEGINNING_OF_CENTRAL, &(ownrates.elec), sizeof(f64_vec3));
} else {
// NOT domain vertex: Do nothing
};
}; // was it domain vertex or Az-only
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
our_v = shared_vie[threadIdx.x];
our_v_overall = shared_v_overall[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// Why the apparently stupid choice to make another variable? :
three_vec3 ownrates_minor;
memcpy(&(ownrates_minor.ion), &(p_MAR_ion[iMinor]), sizeof(f64_vec3));
memcpy(&(ownrates_minor.elec), &(p_MAR_elec[iMinor]), sizeof(f64_vec3));
f64 vez0, viz0, viz1, vez1;
f64_vec2 vxy0, vxy1;
if (TESTTRI) printf("iMinor %d info.flag %d \nXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\n",
iMinor, info.flag);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
}
else {
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
if (TESTTRI) printf("iMinor %d info.flag %d \nxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\n",
iMinor, info.flag);
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vie[izNeighMinor[iprev] - StartMinor]), sizeof(v4));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vie_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
prev_v_overall = shared_v_overall_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_vie_minor[izNeighMinor[iprev]]), sizeof(v4));
prev_v_overall = p_v_overall_minor[izNeighMinor[iprev]];
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
prev_v_overall = Clockwise_d*prev_v_overall;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
prev_v_overall = Anticlockwise_d*prev_v_overall;
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vie[izNeighMinor[i] - StartMinor]), sizeof(v4));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
opp_v_overall = shared_v_overall[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vie_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
opp_v_overall = shared_v_overall_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_vie_minor[izNeighMinor[i]]), sizeof(v4));
opp_v_overall = p_v_overall_minor[izNeighMinor[i]];
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
} else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
// Worry about pathological cases later.
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
iprev = i - 1; if (iprev < 0) iprev = 5;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vie[izNeighMinor[inext] - StartMinor]), sizeof(v4));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
next_v_overall = shared_v_overall[izNeighMinor[inext] - StartMinor];
} else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vie_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
next_v_overall = shared_v_overall_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_vie_minor[izNeighMinor[inext]]), sizeof(v4));
next_v_overall = p_v_overall_minor[izNeighMinor[inext]];
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
next_v_overall = Anticlockwise_d*next_v_overall;
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
n0 = n_array[i];
n1 = n_array[inext]; // 0,1 are either side of corner 0. What is seq of MinorNeigh ? tick
// Assume neighs 0,1 are relevant to border with tri 0 minor.
vxy0 = THIRD * (our_v.vxy + prev_v.vxy + opp_v.vxy);
vxy1 = THIRD * (our_v.vxy + opp_v.vxy + next_v.vxy);
vez0 = THIRD * (our_v.vez + opp_v.vez + prev_v.vez);
viz0 = THIRD * (our_v.viz + opp_v.viz + prev_v.viz);
vez1 = THIRD * (our_v.vez + opp_v.vez + next_v.vez);
viz1 = THIRD * (our_v.viz + opp_v.viz + next_v.viz); // Not used for anything, apparently.
f64 relvnormal = 0.5*(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
// Note that average instead of upwind, is of course unstable.
// FIX FOR NOW, 22/11/20 :
// We do not allow traffic from insulator-crossing triangles to/from vertex minors.
// This is because we can't have an intermediate cell of momentum within a density cell that has only one end.
// ===========================================================================================================
if (izNeighMinor[i] < BEGINNING_OF_CENTRAL) // triangle
{
if (relvnormal > 0.0) {
// losing stuff n
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.vez);
}
else {
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.vez);
// Why it's minus?
// relvnormal was less than zero but we gain a positive amt of opp_v.
};
};
} else {
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 v_overall0, v_overall1;
v_overall0 = THIRD * (our_v_overall + prev_v_overall + opp_v_overall);
v_overall1 = THIRD * (our_v_overall + next_v_overall + opp_v_overall);
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
// endpt1 is defined in this way, so its motion must be defined accordingly.
// The v_overall of the below-insulator point is actually 0.
f64 r3 = nextpos.modulus();
v_overall1 = ((DEVICE_RADIUS_INSULATOR_OUTER - r3) / (r1 - r3))*v_overall0;
// but has no radial component:
v_overall1 -= (v_overall1.dot(endpt1)) / (endpt1.dot(endpt1))*endpt1;
} else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
f64 r3 = prevpos.modulus();
v_overall0 = ((DEVICE_RADIUS_INSULATOR_OUTER - r3) / (r2 - r3))*v_overall1;
// but has no radial component:
v_overall0 -= (v_overall0.dot(endpt0)) / (endpt0.dot(endpt0))*endpt1;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// have not yet handled how to do momflux between two CROSSING_INS tris.
// the above vxy1 etc will be invalid because of taking data from insulator points.
// Does that mean we will get weird effects? Probably. Have to think here then.
// Reset relvnormal:
if (prev_v.vez == 0.0) vxy0 = 0.5*(our_v.vxy + opp_v.vxy);
if (next_v.vez == 0.0) vxy1 = 0.5*(our_v.vxy + opp_v.vxy);
//vxy0 = THIRD * (our_v.vxy + prev_v.vxy + opp_v.vxy);
if (n0 == 0.0) // generated from shardmodel from inside the insulator, then it should come out 0.
n0 = 0.5*(p_n_minor[iMinor].n + p_n_minor[izNeighMinor[i]].n);
if (n1 == 0.0)
n1 = 0.5*(p_n_minor[iMinor].n + p_n_minor[izNeighMinor[i]].n);
relvnormal = 0.5*(vxy0 + vxy1 - v_overall0 - v_overall1).dot(edge_normal);
if (relvnormal > 0.0) {
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.vez);
} else {
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.vez);
};
} else {
// Looking down into insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
int debugprevflag = 0, debugnextflag = 0;
f64_vec2 endpt0store, endpt1store;
endpt0store = endpt0;
endpt1store = endpt1;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
debugprevflag = 1;
} else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
debugnextflag = 1;
} else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
};
// will be a 0 contribution if endpt1 = endpt0, that's ok.
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// should be facing towards (0,0).
// Insulator arc isn't moving, no v_overall.
relvnormal = our_v.vxy.dot(edge_normal);
if (relvnormal > 0.0) {
f64 n_edge = p_n_minor[iMinor].n;
// Only the vr component is reversed!!!
// f64 vr = -our_v.vxy.dot(edge_normal) / edge_normal.modulus();
// rhat = -edge_normal/edge_normal.modulus();
// v-= vr rhat
f64_vec2 vr_rhat = edge_normal*((our_v.vxy.dot(edge_normal)) /
(edge_normal.dot(edge_normal)));
// positive amt * negative r vector = negative amt * positive r vector.
f64 vr_squared = our_v.vxy.dot(edge_normal)*our_v.vxy.dot(edge_normal) /
edge_normal.dot(edge_normal);
ownrates_minor.ion -= 2.0*relvnormal*n_edge*Make3(vr_rhat,0.0);
ownrates_minor.elec -= 2.0*relvnormal*n_edge*Make3(vr_rhat, 0.0);
// Now add heat:
NTrates dNT = NT_addition_tri[iMinor];
// change in 0.5 Nmvv = 0.5mv d/dt(Nv) = m*vr*vr*n_edge*relvnormal since v dot vr rhat = vr^2
// change in 1.5 NT should cancel this.
dNT.NiTi += 0.6666666666667*m_i*vr_squared*n_edge*relvnormal;
dNT.NeTe += 0.6666666666667*m_e*vr_squared*n_edge*relvnormal;
// printf("iMinor %d dNiTi %1.9E cont %1.9E vr_squared %1.9E n %1.8E relvn %1.8E our_v %1.8E %1.8E \n"
// "debugflags %d %d endpt0 %1.8E %1.8E endpt1 %1.8E %1.8E previously %1.8E %1.8E, %1.8E %1.8E edgenormal %1.8E %1.8E \n",
// iMinor, dNT.NiTi, 0.6666666666667*vr_squared*n_edge*relvnormal,
// vr_squared, n_edge, relvnormal, our_v.vxy.x, our_v.vxy.y,
// debugprevflag, debugnextflag, endpt0.x, endpt0.y, endpt1.x, endpt1.y,
// endpt0store.x, endpt0store.y, endpt1store.x, endpt1store.y, edge_normal.x, edge_normal.y);
NT_addition_tri[iMinor] = dNT;
};
// If we are pulling away from the ins, do nothing!
};
};
} else {
// Typical edge.
if (relvnormal > 0.0) {
// losing stuff
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(our_v.vxy, our_v.vez);
}
else {
ownrates_minor.ion -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.viz);
ownrates_minor.elec -= 0.5*relvnormal*(n0 + n1)*Make3(opp_v.vxy, opp_v.vez);
// Why it's minus?
// relvnormal was less than zero but we gain a positive amt of opp_v.
};
};
if (((TESTTRI)))
printf("advectiveGPU %d i %d iznm %d info.flag %d neigh.flag %d contrib %1.10E edge_nml %1.8E %1.8E\n"
"relvnormal %1.10E v_use %1.9E %1.9E n0 %1.12E n1 %1.12E our_vez %1.10E opp_vez %1.10E\n"
"~~~&~&~&~&~&~&~&~&~&~&~&~&~&~&~&~&&~&~&~&~&~&~&~&~&~&~~~\n",
CHOSEN,i, izNeighMinor[i],
info.flag, p_info_minor[izNeighMinor[i]].flag,
-0.5*relvnormal*(n0 + n1)*((relvnormal>0.0)?our_v.vez:opp_v.vez),
edge_normal.x, edge_normal.y,
relvnormal,
(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).x,
(vxy0 + vxy1
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).y,
n0, n1, our_v.vez, opp_v.vez);
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
};
memcpy(&(p_MAR_ion[iMinor]), &(ownrates_minor.ion), sizeof(f64_vec3));
memcpy(&(p_MAR_elec[iMinor]), &(ownrates_minor.elec), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
} // non-domain tri
}; // was it FRILL
}
// Not optimized: !!
#define FACTOR_HALL (1.0/0.96)
#define FACTOR_PERP (1.2/0.96)
//#define DEBUGNANS
__global__ void kernelCalculate_deps_WRT_beta_Visc(
f64 const hsub,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_parallel_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_ita_parallel_elec_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_elec_minor, // nT / nu ready to look up
f64_vec3 * __restrict__ p_B_minor,
nvals * __restrict__ p_n_minor, // got this
f64 * __restrict__ p_AreaMinor, // got this -> N, Nn
f64_vec3 * __restrict__ p_Jacobi_ion,
f64_vec3 * __restrict__ p_Jacobi_elec,
f64_vec3 * __restrict__ p_d_eps_by_d_beta_i_,
f64_vec3 * __restrict__ p_d_eps_by_d_beta_e_
)
{
// We only need 3 in shared now, can re-do when we do elec
__shared__ f64_vec3 shared_vJ[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_B[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ f64_vec3 shared_vJ_verts[threadsPerTileMajor]; // load & reload in Jacobi regressor v instead of v
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_B_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// Putting some stuff in shared may speed up if there are spills. !!
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64_vec3 our_v, opp_v, prev_v, next_v;
f64_vec2 opppos, prevpos, nextpos;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 d_eps_by_d_beta;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_vJ[threadIdx.x] = p_Jacobi_ion[iMinor]; // is memcpy faster or slower than operator= ?
shared_B[threadIdx.x] = p_B_minor[iMinor].xypart();
shared_ita_par[threadIdx.x] = p_ita_parallel_ion_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_ion_minor[iMinor];
// Perhaps the real answer is this. Advection and therefore advective momflux
// do not need to be recalculated very often at all. At 1e6 cm/s, we aim for 1 micron,
// get 1e-10s to actually do the advection !!
// So an outer cycle. Still limiting the number of total things in a minor tile. We might like 384 = 192*2.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_B_verts[threadIdx.x] = p_B_minor[iVertex + BEGINNING_OF_CENTRAL].xypart();
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_vJ_verts[threadIdx.x]), &(p_Jacobi_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
}
else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_vJ_verts[threadIdx.x]), 0, sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
// IONS FIRST:
if (threadIdx.x < threadsPerTileMajor) {
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
if ((info.flag == DOMAIN_VERTEX) && (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
our_v = shared_vJ_verts[threadIdx.x]; // optimization: use replace or #define to get rid of storing this again.
d_eps_by_d_beta = our_v; // eps = v_k+1 - v_k - h/N MAR
// Rate of change of eps_x = Jacobi_x
f64 Factor = hsub / (p_n_minor[iVertex + BEGINNING_OF_CENTRAL].n * p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] * m_ion);
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vJ[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_Jacobi_ion[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vJ[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_Jacobi_ion[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ci;
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
// Now sort out anticlock vars:
{
f64_vec2 opp_B;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
f64 ita_theirs = p_ita_parallel_ion_minor[izTri[i]];
f64 nu_theirs = p_nu_ion_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = ita_theirs;
nu = nu_theirs;
};
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vJ[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_Jacobi_ion[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (ita_par > 0.0) {
// Order of calculations may help things to go out/into scope at the right times so careful with that.
f64_vec2 gradvx, gradvy, gradviz;
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(our_v.x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + our_v.x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(our_v.x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + our_v.x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(our_v.y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + our_v.y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(our_v.y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + our_v.y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
//
// if (TEST) printf(
// "iVertex %d our_v.y next prev opp %1.8E %1.8E %1.8E %1.8E\n"
// "area_quad %1.8E \n"
// "info.pos %1.8E %1.8E opppos %1.8E %1.8E prev %1.8E %1.8E next %1.8E %1.8E\n",
// iVertex, our_v.vxy.y, next_v.vxy.y, prev_v.vxy.y, opp_v.vxy.y,
// area_quadrilateral,
// info.pos.x, info.pos.y, opppos.x, opppos.y, prevpos.x, prevpos.y, nextpos.x, nextpos.y);
//
gradviz.x = 0.5*(
(our_v.z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + our_v.z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.y = -0.5*(
(our_v.z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + our_v.z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if ((VISCMAG == 0) || (omega_ci.dot(omega_ci) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradviz.x);
Pi_zy = -ita_par*(gradviz.y);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
//visc_contrib.y = -over_m_i*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.x += Factor*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y); // - h/N visc_contrib I think
d_eps_by_d_beta.y += Factor*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z += Factor*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
}
else {
f64 omegamod;
f64_vec3 unit_b, unit_perp, unit_Hall;
{
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
f64 omegasq = omega_ci.dot(omega_ci);
omegamod = sqrt(omegasq);
unit_b = omega_ci / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
// ita_perp = FACTOR_PERP * ita_par * nu*nu / (omegasq + nu*nu);
// ita_cross = FACTOR_HALL * ita_par * nu*omegamod / (omegasq + nu*nu);
}
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
// but we can make do with 3x partials
// 2. Now get partials in magnetic coordinates
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradviz);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradviz);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradviz);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// ownrates will be divided by N to give dv/dt
// visc_contrib.z = over_m_i*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
// ownrates_visc += visc_contrib;
d_eps_by_d_beta.x += -Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y += -Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.z += -Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
// We should have created device function for the visc calc since it is repeated now at least 8 times.
// Note that momflux here already had -, visc_contrib did not contain -over_m_i as for unmag.
}
}; // ita_par > 0.0
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
opppos = nextpos;
opp_v = next_v;
}; // next i
memcpy(p_d_eps_by_d_beta_i_ + iVertex + BEGINNING_OF_CENTRAL, &d_eps_by_d_beta, sizeof(f64_vec3));
} else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// Ion , triangle:
info = p_info_minor[iMinor];
our_v = shared_vJ[threadIdx.x];
d_eps_by_d_beta = our_v;
//if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
{
long izNeighMinor[6];
char szPBC[6];
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) && (shared_ita_par[threadIdx.x] > 0.0)) {
f64 Factor = hsub / (p_n_minor[iMinor].n * p_AreaMinor[iMinor] * m_ion);
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vJ[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vJ_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_Jacobi_ion[izNeighMinor[iprev]]), sizeof(f64_vec3));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vJ[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vJ_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_Jacobi_ion[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ci;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vJ[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vJ_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_Jacobi_ion[izNeighMinor[inext]]), sizeof(f64_vec3));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
// nu = 1.0e10; // DEBUG
bool bUsableSide = true;
{
f64_vec2 opp_B(0.0, 0.0);
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
} else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
} else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
f64 ita_par_opp = p_ita_parallel_ion_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_ion_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
} else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (bUsableSide) {
// New definition of endpoint of minor edge:
f64_vec2 gradvx, gradvy, gradviz;
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(our_v.x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + our_v.x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(our_v.x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + our_v.x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(our_v.y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + our_v.y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(our_v.y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + our_v.y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.x = 0.5*(
(our_v.z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + our_v.z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.y = -0.5*(
(our_v.z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + our_v.z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if ((VISCMAG == 0) || (omega_ci.dot(omega_ci) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradviz.x);
Pi_zy = -ita_par*(gradviz.y);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
d_eps_by_d_beta.x = Factor*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
d_eps_by_d_beta.y = Factor*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z = Factor*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
// So we are saying if edge_normal.x > 0 and gradviz.x > 0
// then Pi_zx < 0 then ownrates += a positive amount. That is correct.
}
else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
f64 omegasq = omega_ci.dot(omega_ci);
omegamod = sqrt(omegasq);
unit_b = omega_ci / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradviz);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradviz);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradviz);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// visc_contrib.x = over_m_i*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
// Screen out looking out into insulator:
// Not really needed since we did bUsableSide, but let's leave it in for now just to be delicate.
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX)) {
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
} else {
// DO NOTHING -- no additions
}
} else {
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
};
}
}; // bUsableSide
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
opppos = nextpos;
opp_v = next_v;
};
memcpy(&(p_d_eps_by_d_beta_i_[iMinor]), &(d_eps_by_d_beta), sizeof(f64_vec3));
}
else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
__syncthreads();
// Now do electron: overwrite ita and nu, copy-paste the above codes very carefully
// OVERWRITE REGRESSOR
shared_ita_par[threadIdx.x] = p_ita_parallel_elec_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_elec_minor[iMinor];
shared_vJ[threadIdx.x] = p_Jacobi_elec[iMinor];
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) // keeping consistent with ion above where we did put OUTERMOST here
{// but we set ita to 0 in the pre routine for outermost.
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_vJ_verts[threadIdx.x] = p_Jacobi_elec[iVertex + BEGINNING_OF_CENTRAL];
} else {
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
memset(&(shared_vJ_verts[threadIdx.x]), 0, sizeof(f64_vec3));
};
};
__syncthreads();
if (threadIdx.x < threadsPerTileMajor) {
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len; // ?!
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
our_v = shared_vJ_verts[threadIdx.x]; // optimization: use replace or #define to get rid of storing this again.
d_eps_by_d_beta = our_v;
if ((info.flag == DOMAIN_VERTEX) && (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
f64 Factor = hsub / (p_n_minor[iVertex + BEGINNING_OF_CENTRAL].n * p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] * m_e);
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vJ[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prev_v = p_Jacobi_elec[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vJ[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
} else {
opp_v = p_Jacobi_elec[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
opp_ita = shared_ita_par[izTri[i] - StartMinor];
opp_nu = shared_nu[izTri[i] - StartMinor];
//ita_par = 0.5*(shared_ita_par_verts[threadIdx.x] + shared_ita_par[izTri[i] - StartMinor]);
//nu = 0.5*(shared_nu_verts[threadIdx.x] + shared_nu[izTri[i] - StartMinor]);
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izTri[i]];
opp_nu = p_nu_elec_minor[izTri[i]];
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par_verts[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vJ[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_Jacobi_elec[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (ita_par > 0.0) {
// Order of calculations may help things to go out/into scope at the right times so careful with that.
f64_vec2 gradvx, gradvy, gradvez;
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(our_v.x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + our_v.x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(our_v.x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + our_v.x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(our_v.y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + our_v.y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(our_v.y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + our_v.y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.x = 0.5*(
(our_v.z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + our_v.z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.y = -0.5*(
(our_v.z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + our_v.z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if ((VISCMAG == 0) || (omega_ce.dot(omega_ce) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
// Let's suppose, Pi_yx means the rate of flow of y-momentum in the x direction.
// Thus when we want to know how much y momentum is flowing through the wall we take
// Pi_yx.edge_x + Pi_yy.edge_y -- reasonable.
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradvez.x);
Pi_zy = -ita_par*(gradvez.y);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
d_eps_by_d_beta.x += Factor*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
d_eps_by_d_beta.y += Factor*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z += Factor*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
}
else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
f64 omegasq = omega_ce.dot(omega_ce);
omegamod = sqrt(omegasq);
unit_b = omega_ce / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradvez);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradvez);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradvez);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y; // b component
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y; // P component
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y; // H component
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
};
};
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
opppos = nextpos;
opp_v = next_v;
}; // next i
memcpy(p_d_eps_by_d_beta_e_ + iVertex + BEGINNING_OF_CENTRAL, &d_eps_by_d_beta, sizeof(f64_vec3));
} else {
// NOT domain vertex: Do nothing
};
};
// Electrons in tris:
info = p_info_minor[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
}
else {
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) && (shared_ita_par[threadIdx.x] > 0.0)){
our_v = shared_vJ[threadIdx.x];
d_eps_by_d_beta = our_v;
f64 Factor = hsub / (p_n_minor[iMinor].n * p_AreaMinor[iMinor] * m_e);
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vJ[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vJ_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_Jacobi_elec[izNeighMinor[iprev]]), sizeof(f64_vec3));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vJ[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vJ_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_Jacobi_elec[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vJ[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vJ_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_Jacobi_elec[izNeighMinor[inext]]), sizeof(f64_vec3));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
bool bUsableSide = true;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
opp_ita = shared_ita_par[izNeighMinor[i] - StartMinor];
opp_nu = shared_nu[izNeighMinor[i] - StartMinor];
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_ita = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izNeighMinor[i]];
opp_nu = p_nu_elec_minor[izNeighMinor[i]];
if (opp_ita == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (bUsableSide) {
// New definition of endpoint of minor edge:
f64_vec2 gradvez, gradvx, gradvy;
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(our_v.x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + our_v.x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(our_v.x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + our_v.x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(our_v.y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + our_v.y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(our_v.y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + our_v.y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.x = 0.5*(
(our_v.z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + our_v.z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.y = -0.5*(
(our_v.z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + our_v.z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if ((VISCMAG == 0) || (omega_ce.dot(omega_ce) < 0.1*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradvez.x);
Pi_zy = -ita_par*(gradvez.y);
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX)) {
d_eps_by_d_beta.x += Factor *(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
d_eps_by_d_beta.y += Factor *(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z += Factor *(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
} else {
// DO NOTHING
}
} else {
d_eps_by_d_beta.x += Factor *(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
d_eps_by_d_beta.y += Factor *(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
d_eps_by_d_beta.z += Factor *(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
}
} else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
f64 omegasq = omega_ce.dot(omega_ce);
omegamod = sqrt(omegasq);
unit_b = omega_ce / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradvez);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradvez);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradvez);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
f64_vec2 edge_normal;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x; // need to define so as to create unit vectors
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall
// is the flow of p_x dotted with the edge_normal
// ownrates will be divided by N to give dv/dt
// m N dvx/dt = integral div momflux_x
// Therefore divide here just by m
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX)) {
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
} else {
// DO NOTHING
}
} else {
d_eps_by_d_beta.x -= Factor*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
d_eps_by_d_beta.y -= Factor*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
d_eps_by_d_beta.z -= Factor*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
}
}
}; // bUsableSide
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
opppos = nextpos;
opp_v = next_v;
};
memcpy(&(p_d_eps_by_d_beta_e_[iMinor]), &(d_eps_by_d_beta), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
} // non-domain tri
}; // was it FRILL
}
__global__ void
// __launch_bounds__(128) -- manual says that if max is less than 1 block, kernel launch will fail. Too bad huh.
kernelCreate_viscous_contrib_to_MAR_and_NT(
structural * __restrict__ p_info_minor,
v4 * __restrict__ p_vie_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_parallel_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_ita_parallel_elec_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_ion_minor, // nT / nu ready to look up
f64 * __restrict__ p_nu_elec_minor, // nT / nu ready to look up
f64_vec3 * __restrict__ p_B_minor,
f64_vec3 * __restrict__ p_MAR_ion,
f64_vec3 * __restrict__ p_MAR_elec,
NTrates * __restrict__ p_NT_addition_rate,
NTrates * __restrict__ p_NT_addition_tri)
{
__shared__ v4 shared_vie[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_B[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ v4 shared_vie_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_B_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// 4+2+2+1+1 *1.5 = 15 per thread. That is possibly as slow as having 24 per thread.
// Thus putting some stuff in shared may speed up if there are spills.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 ownrates_visc;
f64 visc_htg;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_vie[threadIdx.x] = p_vie_minor[iMinor];
shared_B[threadIdx.x] = p_B_minor[iMinor].xypart();
shared_ita_par[threadIdx.x] = p_ita_parallel_ion_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_ion_minor[iMinor];
// Perhaps the real answer is this. Advection and therefore advective momflux
// do not need to be recalculated very often at all. At 1e6 cm/s, we aim for 1 micron,
// get 1e-10s to actually do the advection !!
// So an outer cycle. Still limiting the number of total things in a minor tile. We might like 384 = 192*2.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_B_verts[threadIdx.x] = p_B_minor[iVertex + BEGINNING_OF_CENTRAL].xypart();
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_vie_verts[threadIdx.x]), &(p_vie_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(v4));
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_ion_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_vie_verts[threadIdx.x]), 0, sizeof(v4));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
// IONS FIRST:
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
if ((info.flag == DOMAIN_VERTEX) && (shared_ita_par_verts[threadIdx.x] > 0.0) )
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
// DROP THIS ONE.
// f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
// short iend = tri_len;
//f64_vec2 projendpt0;
//if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
// iend = tri_len - 2;
// if (info.flag == OUTERMOST) {
// endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
// }
// else {
// endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
// }
// edge_normal.x = endpt0.y - projendpt0.y;
// edge_normal.y = projendpt0.x - endpt0.x;
// AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
//};
#pragma unroll
for (short i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
f64_vec2 gradvx, gradvy, gradviz;
f64_vec3 htg_diff;
f64_vec2 edge_normal;
// Order of calculations may help things to go out/into scope at the right times so careful with that.
//f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
// we also want to get nu from somewhere. So precompute nu at the time we precompute ita_e = n Te / nu_e, ita_i = n Ti / nu_i.
f64_vec3 omega_ci;
{
f64_vec2 opp_B;
f64 ita_theirs, nu_theirs;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
ita_theirs = shared_ita_par[izTri[i] - StartMinor];
nu_theirs = shared_nu[izTri[i] - StartMinor];
} else {
opp_B = p_B_minor[izTri[i]].xypart();
ita_theirs = p_ita_parallel_ion_minor[izTri[i]];
nu_theirs = p_nu_ion_minor[izTri[i]];
};
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
} else {
ita_par = ita_theirs;
nu = nu_theirs;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
} // Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
if (ita_par > 0.0)
{
v4 opp_v, prev_v, next_v;
f64_vec2 opppos, prevpos, nextpos;
// ideally we might want to leave position out of the loop so that we can avoid reloading it.
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vie[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_vie_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vie[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_vie_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vie[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_vie_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
}
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x);
gradvx.x = 0.5*(
(shared_vie_verts[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.x + shared_vie_verts[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_vie_verts[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.x + shared_vie_verts[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_vie_verts[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.y + shared_vie_verts[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_vie_verts[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.y + shared_vie_verts[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.x = 0.5*(
(shared_vie_verts[threadIdx.x].viz + next_v.viz)*(info.pos.y - nextpos.y)
+ (prev_v.viz + shared_vie_verts[threadIdx.x].viz)*(prevpos.y - info.pos.y)
+ (opp_v.viz + prev_v.viz)*(opppos.y - prevpos.y)
+ (next_v.viz + opp_v.viz)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.y = -0.5*(
(shared_vie_verts[threadIdx.x].viz + next_v.viz)*(info.pos.x - nextpos.x)
+ (prev_v.viz + shared_vie_verts[threadIdx.x].viz)*(prevpos.x - info.pos.x)
+ (opp_v.viz + prev_v.viz)*(opppos.x - prevpos.x)
+ (next_v.viz + opp_v.viz)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (TESTIONVERTVISC) printf(
"iVertex %d area_quad %1.8E \n"
"our_v.x next prev opp %1.8E %1.8E %1.8E %1.8E gradvx %1.8E %1.8E\n"
"our_v.y next prev opp %1.8E %1.8E %1.8E %1.8E gradvy %1.8E %1.8E\n"
"our_v.z next prev opp %1.8E %1.8E %1.8E %1.8E gradvz %1.8E %1.8E\n"
"info.pos %1.8E %1.8E opppos %1.8E %1.8E prev %1.8E %1.8E next %1.8E %1.8E\n",
iVertex, area_quadrilateral,
shared_vie_verts[threadIdx.x].vxy.x, next_v.vxy.x, prev_v.vxy.x, opp_v.vxy.x,
gradvx.x, gradvx.y,
shared_vie_verts[threadIdx.x].vxy.y, next_v.vxy.y, prev_v.vxy.y, opp_v.vxy.y,
gradvy.x, gradvy.y,
shared_vie_verts[threadIdx.x].viz, next_v.viz, prev_v.viz, opp_v.viz,
gradviz.x, gradviz.y,
info.pos.x, info.pos.y, opppos.x, opppos.y, prevpos.x, prevpos.y, nextpos.x, nextpos.y);
htg_diff.x = shared_vie_verts[threadIdx.x].vxy.x - opp_v.vxy.x;
htg_diff.y = shared_vie_verts[threadIdx.x].vxy.y - opp_v.vxy.y;
htg_diff.z = shared_vie_verts[threadIdx.x].viz - opp_v.viz;
}
if (ita_par > 0.0) {
if ((VISCMAG == 0) || (omega_ci.dot(omega_ci) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradviz.x);
Pi_zy = -ita_par*(gradviz.y);
f64_vec3 visc_contrib;
visc_contrib.x = -over_m_i*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
visc_contrib.y = -over_m_i*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
visc_contrib.z = -over_m_i*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
//if (info.flag == OUTERMOST) {
// if (p_info_minor[izTri[i]].flag == DOMAIN_TRIANGLE)
// {
// ownrates_visc += visc_contrib;
// visc_htg += -THIRD*m_ion*(
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z);
// // do not look into frill
// }
// else {
// visc_contrib.x = 0.0; visc_contrib.y = 0.0; visc_contrib.z = 0.0;
// }
//} else
{
ownrates_visc += visc_contrib;
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z);
}
//
// if (TEST)
// printf("iVertex %d tri %d ION ita_par %1.9E \n"
// "gradvx %1.8E %1.8E gradvy %1.8E %1.8E gradvz %1.8E %1.8E\n"
// "edgenormal %1.8E %1.8E opp_viz %1.10E our_viz %1.10E\n"
// "ourpos %1.8E %1.8E opp pos %1.8E %1.8E\n"
// "Pi_xx %1.8E xy %1.8E yy %1.8E zx %1.8E\n"
// "visc_contrib %1.9E %1.9E %1.9E visc_htg %1.10E\n"
// "===\n",
// iVertex, izTri[i], ita_par, gradvx.x, gradvx.y, gradvy.x, gradvy.y,
// gradviz.x, gradviz.y,
// edge_normal.x, edge_normal.y, opp_v.viz, our_v.viz,
// info.pos.x,info.pos.y, opppos.x,opppos.y,
// Pi_xx, Pi_xy, Pi_yy, Pi_zx,
// visc_contrib.x, visc_contrib.y, visc_contrib.z, visc_htg
// );
////
// So we are saying if edge_normal.x > 0 and gradviz.x > 0
// then Pi_zx < 0 then ownrates += a positive amount. That is correct.
}
else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
// but we can make do with 3x partials
// 2. Now get partials in magnetic coordinates
f64 omegamod;
{
//f64_vec2 edge_normal;
//edge_normal.x = THIRD * (nextpos.y - prevpos.y);
//edge_normal.y = THIRD * (prevpos.x - nextpos.x);
f64 omegasq = omega_ci.dot(omega_ci);
omegamod = sqrt(omegasq);
unit_b = omega_ci / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// We picked edge_normal to be unit_perp.
// Is that at all valid?
// It seems like an arbitrary choice. Since B is in the plane, it's saying we picked perp in the plane, H = z.
// store omegamod instead.
// ita_perp = FACTOR_PERP * ita_par * nu*nu / (omegasq + nu*nu);
// ita_cross = FACTOR_HALL * ita_par * nu*omegamod / (omegasq + nu*nu);
}
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradviz);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
if (TESTIONVERTVISC)
printf("dvperp_by_db %1.8E W_bP %1.8E \n",
dvperp_by_db, W_bP);
if (TESTIONVERTVISC)
printf("dvHall_by_db %1.8E W_bH %1.8E \n",
dvHall_by_db, W_bH);
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradviz);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
if (TESTIONVERTVISC)
printf("dvb_by_dperp %1.8E W_bP %1.8E \n",
dvb_by_dperp, W_bP);
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradviz);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
if (TESTIONVERTVISC)
printf("dvb_by_dHall %1.8E W_bH %1.8E \n",
dvb_by_dHall, W_bH);
}
}
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
if (TESTIONVERTVISC)
printf(" -ita_2 %1.8E W_bP %1.8E contrib %1.8E Pi_P_b %1.8E \n",
-ita_2, W_bP, -ita_2*W_bP, Pi_P_b);
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
if (TESTIONVERTVISC)
printf(" -ita_4 %1.8E W_bH %1.8E contrib %1.8E Pi_P_b %1.8E nu %1.8E omega %1.8E \n",
-ita_4, W_bH, -ita_4*W_bH, Pi_P_b, nu, omegamod);
Pi_H_b += ita_4*W_bP;
}
}
} // scope W
// All we want left over at this point is Pi .. and unit_b
f64 momflux_b, momflux_perp, momflux_Hall;
{
// Most efficient way: compute mom flux in magnetic coords
f64_vec3 mag_edge;
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall
// is the flow of p_x dotted with the edge_normal
// ownrates will be divided by N to give dv/dt
// m N dvx/dt = integral div momflux_x
// Therefore divide here just by m
f64_vec3 visc_contrib;
visc_contrib.x = over_m_i*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
visc_contrib.y = over_m_i*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
visc_contrib.z = over_m_i*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
//if (info.flag == OUTERMOST) {
// if (p_info_minor[izTri[i]].flag == DOMAIN_TRIANGLE) {
// ownrates_visc += visc_contrib;
// visc_htg += -TWOTHIRDS*m_ion*(
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z); // Claim all visc htg for this vertcell
// }
//} else
{
ownrates_visc += visc_contrib;
visc_htg += -TWOTHIRDS*m_ion*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z); // Claim all visc htg for this vertcell
}
if (TESTIONVERTVISC) {
printf("iVertex %d tri %d ION ita_par %1.9E omega %1.9E %1.9E %1.9E nu %1.9E ourpos %1.8E %1.8E \n"
"unit_b %1.8E %1.8E %1.8E unit_perp %1.8E %1.8E %1.8E unit_Hall %1.8E %1.8E %1.8E\n"
"Pi_b_b %1.8E Pi_P_b %1.8E Pi_P_P %1.8E Pi_H_b %1.8E Pi_H_P %1.8E Pi_H_H %1.8E\n"
"momflux b %1.8E perp %1.8E cross %1.8E visc_contrib %1.9E %1.9E %1.9E \n",
iVertex, izTri[i], ita_par, omega_ci.x, omega_ci.y, omega_ci.z, nu,
info.pos.x, info.pos.y,
unit_b.x, unit_b.y, unit_b.z, unit_perp.x, unit_perp.y, unit_perp.z, unit_Hall.x, unit_Hall.y, unit_Hall.z,
Pi_b_b, Pi_P_b, Pi_P_P, Pi_H_b, Pi_H_P, Pi_H_H,
momflux_b, momflux_perp, momflux_Hall,
visc_contrib.x, visc_contrib.y, visc_contrib.z
);
printf(
"htgdiff %1.10E %1.10E %1.10E htg %1.10E \n===========================\n",
htg_diff.x,
htg_diff.y,
htg_diff.z,
-TWOTHIRDS*m_ion*(htg_diff.dot(visc_contrib))
);
}
//
}
}; // was ita_par == 0
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
// Just leaving these but they won't do anything :
//prevpos = opppos;
//prev_v = opp_v;
//opppos = nextpos;
//opp_v = next_v;
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_ion[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
// if (TEST)
// printf("%d ion ownrates %1.8E %1.8E %1.8E ownrates_visc %1.8E %1.8E %1.8E our_v %1.8E %1.8E %1.8E\n",
// iVertex, ownrates.x, ownrates.y, ownrates.z, ownrates_visc.x, ownrates_visc.y, ownrates_visc.z, our_v.vxy.x, our_v.vxy.y, our_v.viz);
ownrates += ownrates_visc;
memcpy(p_MAR_ion + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
p_NT_addition_rate[iVertex].NiTi += visc_htg;
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex %d NaN ownrates.x\n",iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex %d NAN VISC HTG\n", iVertex);
#endif
} else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
{
long izNeighMinor[6];
char szPBC[6];
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS))
&& (shared_ita_par[threadIdx.x] > 0.0)){
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_vec3 omega_ci;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (short i = 0; i < 6; i++)
{
if (TESTIONVISC) printf("start loop %d: ownrates.x %1.9E", i, ownrates_visc.x);
bool bUsableSide = true;
{
f64_vec2 opp_B(0.0, 0.0);
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
// USEFUL:
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
f64 ita_par_opp = p_ita_parallel_ion_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_ion_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
omega_ci = 0.5*qoverMc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 gradvx, gradvy, gradviz;
f64_vec2 edge_normal;
f64_vec3 htg_diff;
if (bUsableSide)
{
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
v4 prev_v, opp_v, next_v;
f64_vec2 prevpos, nextpos, opppos;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vie[izNeighMinor[iprev] - StartMinor]), sizeof(v4));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vie_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_vie_minor[izNeighMinor[iprev]]), sizeof(v4));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
}
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vie[izNeighMinor[i] - StartMinor]), sizeof(v4));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vie_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_vie_minor[izNeighMinor[i]]), sizeof(v4));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
}
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vie[izNeighMinor[inext] - StartMinor]), sizeof(v4));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vie_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_vie_minor[izNeighMinor[inext]]), sizeof(v4));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
}
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.x = 0.5*(
(shared_vie[threadIdx.x].viz + next_v.viz)*(info.pos.y - nextpos.y)
+ (prev_v.viz + shared_vie[threadIdx.x].viz)*(prevpos.y - info.pos.y)
+ (opp_v.viz + prev_v.viz)*(opppos.y - prevpos.y)
+ (next_v.viz + opp_v.viz)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradviz.y = -0.5*(
(shared_vie[threadIdx.x].viz + next_v.viz)*(info.pos.x - nextpos.x)
+ (prev_v.viz + shared_vie[threadIdx.x].viz)*(prevpos.x - info.pos.x)
+ (opp_v.viz + prev_v.viz)*(opppos.x - prevpos.x)
+ (next_v.viz + opp_v.viz)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
if (prev_v.vxy.x == 0.0) // prev is in the insulator.
{
// do like the above but it goes (ours, next, opp) somehow?
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
gradvx.x = 0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (opp_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(opppos.y - info.pos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (opp_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(opppos.x - info.pos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x)
) / area_triangle;
gradvy.x = 0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (opp_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(opppos.y - info.pos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (opp_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(opppos.x - info.pos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x)
) / area_triangle;
gradviz.x = 0.5*(
(shared_vie[threadIdx.x].viz + next_v.viz)*(info.pos.y - nextpos.y)
+ (opp_v.viz + shared_vie[threadIdx.x].viz)*(opppos.y - info.pos.y)
+ (next_v.viz + opp_v.viz)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradviz.y = -0.5*(
(shared_vie[threadIdx.x].viz + next_v.viz)*(info.pos.x - nextpos.x)
+ (opp_v.viz + shared_vie[threadIdx.x].viz)*(opppos.x - info.pos.x)
+ (next_v.viz + opp_v.viz)*(nextpos.x - opppos.x)
) / area_triangle;
} else {
if (next_v.vxy.x == 0.0) // next is in the insulator
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
gradvx.x = 0.5*(
(prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vxy.x + opp_v.vxy.x)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vxy.x + opp_v.vxy.x)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.x = 0.5*(
(prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vxy.y + opp_v.vxy.y)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vxy.y + opp_v.vxy.y)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradviz.x = 0.5*(
(prev_v.viz + shared_vie[threadIdx.x].viz)*(prevpos.y - info.pos.y)
+ (opp_v.viz + prev_v.viz)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].viz + opp_v.viz)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradviz.y = -0.5*(
(prev_v.viz + shared_vie[threadIdx.x].viz)*(prevpos.x - info.pos.x)
+ (opp_v.viz + prev_v.viz)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].viz + opp_v.viz)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
} else {
printf("\n\n\nDid not make sense! Alert RING-TAILED LEMUR. iMinor %d iNiegh %d \n\n\n\a", iMinor,
izNeighMinor[i]);
};
};
};
};
if (TESTIONVISC) printf("--------------\n%d %d our v: %1.8E %1.8E %1.8E "
"opp v: %1.8E %1.8E %1.8E \n",
iMinor, i, shared_vie[threadIdx.x].vxy.x, shared_vie[threadIdx.x].vxy.y, shared_vie[threadIdx.x].viz,
opp_v.vxy.x, opp_v.vxy.y, opp_v.viz);
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
htg_diff.x = shared_vie[threadIdx.x].vxy.x - opp_v.vxy.x;
htg_diff.y = shared_vie[threadIdx.x].vxy.y - opp_v.vxy.y;
htg_diff.z = shared_vie[threadIdx.x].viz - opp_v.viz;
} else {
if (TESTIONVISC) printf("side not usable: %d", i);
};
if (bUsableSide) {
if ((VISCMAG == 0) || (omega_ci.dot(omega_ci) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradviz.x);
Pi_zy = -ita_par*(gradviz.y);
f64_vec3 visc_contrib;
visc_contrib.x = -over_m_i*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
visc_contrib.y = -over_m_i*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
visc_contrib.z = -over_m_i*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
//// if (info.flag == CROSSING_INS) {
//// char flag = p_info_minor[izNeighMinor[i]].flag;
//// if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX)) {
//// ownrates_visc += visc_contrib;
////
//// if (TESTIONVISC) printf("UNMAGNETIZED visc_contrib.x %1.9E ownrates %1.9E\n",
//// visc_contrib.x, ownrates_visc.x);
////
//// if (i % 2 == 0) {
//// // vertex : heat collected by vertex
//// }
//// else {
//// visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
////// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
//// // + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
//// // + (our_v.viz - opp_v.viz)*visc_contrib.z);
//// // And we are going to give it to what? Just spread it out after.
////
//// }
//// }
//// else {
//// // DO NOTHING
//// }
//// } else {
ownrates_visc += visc_contrib;
if (i % 2 == 0) {
// vertex : heat collected by vertex
}
else {
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.viz - opp_v.viz)*visc_contrib.z);
}
//if (TESTTRI) {
// printf("iMinor %d %d "
// " ita_par %1.11E nu %1.11E omega %1.9E %1.9E %1.9E \n"
// "gradvx %1.9E %1.9E our vx %1.9E theirs %1.9E\n"
// "gradvy %1.9E %1.9E our vy %1.9E theirs %1.9E\n"
// "gradvz %1.9E %1.9E our vz %1.9E theirs %1.9E\n"
// "visc contrib %1.10E %1.10E %1.10E\n"
// "visc htg %1.10E %1.10E %1.10E | running %1.10E \n"
// " *************************************** \n",
// iMinor, izNeighMinor[i],
// ita_par, // Think nu is what breaks it
// nu, omega_ci.x, omega_ci.y, omega_ci.z,
// gradvx.x, gradvx.y, our_v.vxy.x, opp_v.vxy.x,
// gradvy.x, gradvy.y, our_v.vxy.y, opp_v.vxy.y,
// gradviz.x, gradviz.y, our_v.viz, opp_v.viz,
// visc_contrib.x, visc_contrib.y, visc_contrib.z,
// -THIRD*m_ion*(our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x,
// -THIRD*m_ion*(our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y,
// -THIRD*m_ion*(our_v.viz - opp_v.viz)*visc_contrib.z,
// visc_htg
// );
// printf("iMinor %d visc_contrib.z %1.10E our-opp %1.10E z htg %1.10E | running %1.10E \n"
// " *************************************** \n",
// iMinor, visc_contrib.z, our_v.viz - opp_v.viz,
// -(our_v.viz - opp_v.viz)*THIRD*m_ion*visc_contrib.z,
// visc_htg);
// }
// So we are saying if edge_normal.x > 0 and gradviz.x > 0
// then Pi_zx < 0 then ownrates += a positive amount. That is correct.
} else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64 omegasq = omega_ci.dot(omega_ci);
omegamod = sqrt(omegasq);
unit_b = omega_ci / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradviz);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradviz);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradviz);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
//if (TESTTRI)
// printf("iMinor %d %d edge_normal %1.10E %1.10E mag_edge (b,P,H) %1.10E %1.10E %1.10E\n"
// "Pi_b_b %1.10E Pi_b_P %1.10E Pi_b_H %1.10E \n"
// "Pi_P_b %1.10E Pi_P_P %1.10E Pi_P_H %1.10E \n"
// "Pi_H_b %1.10E Pi_H_P %1.10E Pi_H_H %1.10E \n",
// iMinor, izNeighMinor[i], edge_normal.x, edge_normal.y, mag_edge.x, mag_edge.y, mag_edge.z,// b,P,H
// Pi_b_b, Pi_P_b, Pi_H_b,
// Pi_P_b, Pi_P_P, Pi_H_P,
// Pi_H_b, Pi_H_P, Pi_H_H);
}
// Time to double-check carefully the signs.
// Pi was defined with - on dv/dx and we then dot that with the edge_normal, so giving + if we are higher than outside.
// unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall
// is the flow of p_x dotted with the edge_normal
// ownrates will be divided by N to give dv/dt
// m N dvx/dt = integral div momflux_x
// Therefore divide here just by m
f64_vec3 visc_contrib;
visc_contrib.x = over_m_i*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
visc_contrib.y = over_m_i*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
visc_contrib.z = over_m_i*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
if (TESTIONVISC)
printf("%d %d over_m_i %1.9E "
// "unit_b %1.9E %1.9E %1.9E \n"
// "unit_perp %1.9E %1.9E %1.9E \n"
// "unit_Hall %1.9E %1.9E %1.9E \n"
// "momflux_b %1.9E momflux_perp %1.9E momflux_Hall %1.9E\n"
"ita_par %1.10E visc_contrib.x %1.10E \n",
iMinor, izNeighMinor[i], over_m_i,
// unit_b.x, unit_b.y, unit_b.z,
// unit_perp.x, unit_perp.y, unit_perp.z,
// unit_Hall.x, unit_Hall.y, unit_Hall.z,
// momflux_b, momflux_perp, momflux_Hall,
ita_par, visc_contrib.x );
ownrates_visc += visc_contrib;
if (i % 2 != 0) // not vertex
{
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
if (TESTIONVISC)
printf("%d %d visc_htg %1.10E\n", iMinor,i, -THIRD*m_ion*(htg_diff.dot(visc_contrib)));
}
}
}; // bUsableSide
// endpt0 = endpt1;
// prevpos = opppos;
// prev_v = opp_v;
// opppos = nextpos;
// opp_v = next_v;
};
f64_vec3 ownrates;
memcpy(&ownrates,&(p_MAR_ion[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_ion[iMinor]), &(ownrates), sizeof(f64_vec3));
p_NT_addition_tri[iMinor].NiTi += visc_htg;
// Barking mad --- we never made special allowance yet for if a prev point is in insulator.
//___________________________________________________________________________________________
// We will have to round this up into the vertex heat afterwards.
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor %d NAN VISC HTG\n", iMinor);
#endif
// We do best by taking each boundary, considering how
// much heat to add for each one.
} else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
__syncthreads();
// Now do electron: overwrite ita and nu, copy-paste the above codes very carefully
shared_ita_par[threadIdx.x] = p_ita_parallel_elec_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_elec_minor[iMinor];
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) // keeping consistent with ion above where we did put OUTERMOST here
{// but we set ita to 0 in the pre routine for outermost.
shared_ita_par_verts[threadIdx.x] = p_ita_parallel_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_elec_minor[iVertex + BEGINNING_OF_CENTRAL];
}
else {
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len; // ?!
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
if ((info.flag == DOMAIN_VERTEX) && (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
// f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
#pragma unroll
for (int i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
f64_vec3 omega_ce;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_B = shared_B[izTri[i] - StartMinor];
opp_ita = shared_ita_par[izTri[i] - StartMinor];
opp_nu = shared_nu[izTri[i] - StartMinor];
//ita_par = 0.5*(shared_ita_par_verts[threadIdx.x] + shared_ita_par[izTri[i] - StartMinor]);
//nu = 0.5*(shared_nu_verts[threadIdx.x] + shared_nu[izTri[i] - StartMinor]);
}
else {
opp_B = p_B_minor[izTri[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izTri[i]];
opp_nu = p_nu_elec_minor[izTri[i]];
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par_verts[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B_verts[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 gradvx, gradvy, gradvez;
f64_vec2 edge_normal;
f64_vec3 htg_diff;
if (ita_par > 0.0)
{
v4 prev_v, next_v, opp_v;
f64_vec2 prevpos, nextpos, opppos;
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_vie[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_vie_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_vie[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_vie_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_vie[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_vie_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
}
// All same as ion here:
// Order of calculations may help things to go out/into scope at the right times so careful with that.
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(shared_vie_verts[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.x + shared_vie_verts[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_vie_verts[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.x + shared_vie_verts[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_vie_verts[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.y + shared_vie_verts[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_vie_verts[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.y + shared_vie_verts[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.x = 0.5*(
(shared_vie_verts[threadIdx.x].vez + next_v.vez)*(info.pos.y - nextpos.y)
+ (prev_v.vez + shared_vie_verts[threadIdx.x].vez)*(prevpos.y - info.pos.y)
+ (opp_v.vez + prev_v.vez)*(opppos.y - prevpos.y)
+ (next_v.vez + opp_v.vez)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.y = -0.5*(
(shared_vie_verts[threadIdx.x].vez + next_v.vez)*(info.pos.x - nextpos.x)
+ (prev_v.vez + shared_vie_verts[threadIdx.x].vez)*(prevpos.x - info.pos.x)
+ (opp_v.vez + prev_v.vez)*(opppos.x - prevpos.x)
+ (next_v.vez + opp_v.vez)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (TESTVISC) printf("%d our v %1.8E %1.8E %1.8E oppv %1.8E %1.8E %1.8E \n",
izTri[i],
shared_vie_verts[threadIdx.x].vxy.x, shared_vie_verts[threadIdx.x].vxy.y,
shared_vie_verts[threadIdx.x].vez, opp_v.vxy.x, opp_v.vxy.y, opp_v.vez);
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
htg_diff.x = shared_vie_verts[threadIdx.x].vxy.x - opp_v.vxy.x;
htg_diff.y = shared_vie_verts[threadIdx.x].vxy.y - opp_v.vxy.y;
htg_diff.z = shared_vie_verts[threadIdx.x].vez - opp_v.vez;
}
// f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
if (ita_par > 0.0) {
if ((VISCMAG == 0) || (omega_ce.dot(omega_ce) < 0.01*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
// Let's suppose, Pi_yx means the rate of flow of y-momentum in the x direction.
// Thus when we want to know how much y momentum is flowing through the wall we take
// Pi_yx.edge_x + Pi_yy.edge_y -- reasonable.
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradvez.x);
Pi_zy = -ita_par*(gradvez.y);
f64_vec3 visc_contrib;
visc_contrib.x = -over_m_e*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
visc_contrib.y = -over_m_e*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
visc_contrib.z = -over_m_e*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
// if (info.flag == OUTERMOST) {
// if (p_info_minor[izTri[i]].flag == DOMAIN_TRIANGLE) {
// ownrates_visc += visc_contrib;
//
// visc_htg += -TWOTHIRDS*m_e*(
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.vez - opp_v.vez)*visc_contrib.z);
// }
// else {
// visc_contrib.x = 0.0; visc_contrib.y = 0.0; visc_contrib.z = 0.0;
// }
// } else
{
ownrates_visc += visc_contrib;
visc_htg += -TWOTHIRDS*m_e*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.vez - opp_v.vez)*visc_contrib.z);
};
// The alternative, that may or may not run faster, is to test for ita == 0 before we do all the calcs
// and then set ita == 0 in all the places not to look, including OUTERMOST, and do not do traffic to or from it.
//
if (0) // (TESTVISC)
printf("iVertex %d tri %d ELEC ita_par %1.9E own ita %1.9E\n"
"gradvx %1.8E %1.8E gradvy %1.8E %1.8E gradvez %1.8E %1.8E\n"
"edgenormal %1.8E %1.8E\n"
"Pi_xx %1.8E xy %1.8E yy %1.8E zx %1.8E\n"
"visc_contrib %1.9E %1.9E %1.9E \n"
"htg cum %1.9E heating %1.9E \n"
"===\n",
iVertex, izTri[i], ita_par, shared_ita_par_verts[threadIdx.x],
gradvx.x, gradvx.y, gradvy.x, gradvy.y, gradvez.x, gradvez.y,
edge_normal.x, edge_normal.y,
Pi_xx, Pi_xy, Pi_yy, Pi_zx,
visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg,
-TWOTHIRDS*m_e*(htg_diff.dot(visc_contrib))
);
// -= !!!
// So we are saying if edge_normal.x > 0 and gradviz.x > 0
// then Pi_zx < 0 then ownrates += a positive amount. That is correct.
}
else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
f64 omegasq = omega_ce.dot(omega_ce);
omegamod = sqrt(omegasq);
unit_b = omega_ce / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradvez);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradvez);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradvez);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y; // b component
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y; // P component
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y; // H component
// verify for chosen edge that we obtained a 3-vector of the same length as the original edge!
// Tick
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
f64_vec3 visc_contrib;
visc_contrib.x = over_m_e*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
visc_contrib.y = over_m_e*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
visc_contrib.z = over_m_e*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
//if (info.flag == OUTERMOST) {
// if (p_info_minor[izTri[i]].flag == DOMAIN_TRIANGLE) {
// ownrates_visc += visc_contrib;
// visc_htg += -TWOTHIRDS*m_e*(
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.vez - opp_v.vez)*visc_contrib.z);
// }
//}else
{
ownrates_visc += visc_contrib;
visc_htg += -TWOTHIRDS*m_e*(htg_diff.dot(visc_contrib));
// (our_v.vxy.x - opp_v.vxy.x)*visc_contrib.x
// + (our_v.vxy.y - opp_v.vxy.y)*visc_contrib.y
// + (our_v.vez - opp_v.vez)*visc_contrib.z);
};
if (TESTVISC) {
// Most efficient way: compute mom flux in magnetic coords
printf("iVertex %d MAGNETIZED elec: visc contrib %1.8E %1.8E %1.8E\n"
"htg cum %1.9E visc htg %1.9E ita_par %1.9E \n"
"gradvx %1.8E %1.8E gradvy %1.8E %1.8E gradvez %1.8E %1.8E\n"
"unit_b %1.8E %1.8E %1.8E unit_perp %1.8E %1.8E %1.8E unit_H %1.8E %1.8E %1.8E\n"
"omega_ce %1.8E %1.8E %1.8E mod %1.8E nu %1.8E \n"
"===\n",
iVertex, visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg, -TWOTHIRDS*m_e*(htg_diff.dot(visc_contrib)),
ita_par,
gradvx.x, gradvx.y, gradvy.x, gradvy.y, gradvez.x, gradvez.y,
unit_b.x, unit_b.y, unit_b.z, unit_perp.x, unit_perp.y, unit_perp.z, unit_Hall.x, unit_Hall.y, unit_Hall.z,
omega_ce.x, omega_ce.y, omega_ce.z, omega_ce.modulus(), nu);
}
//
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
}
}; // ita_par > 0.0
// endpt0 = endpt1;
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_elec[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
//
if (TESTVISC)
printf("iVertex %d ownrates %1.8E %1.8E %1.8E ownrates_visc %1.8E %1.8E %1.8E htg %1.8E \n",
iVertex, ownrates.x, ownrates.y, ownrates.z, ownrates_visc.x, ownrates_visc.y, ownrates_visc.z, visc_htg);
ownrates += ownrates_visc;
memcpy(p_MAR_elec + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
if (TESTVISC) printf("iVertex %d NeTe recorded %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NeTe);
p_NT_addition_rate[iVertex].NeTe += visc_htg;
if (TESTVISC) printf("iVertex %d NeTe recorded %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NeTe);
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex e %d NaN ownrates.x\n", iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex e %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex e %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex e %d NAN VISC HTG\n", iVertex);
#endif
} else {
// NOT domain vertex: Do nothing
};
};
// Electrons in tris:
info = p_info_minor[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
}
else {
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
short inext, iprev = 5, i = 0;
// f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec3 omega_ce;
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
bool bUsableSide = true;
{
f64_vec2 opp_B;
f64 opp_ita, opp_nu;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opp_B = shared_B[izNeighMinor[i] - StartMinor];
opp_ita = shared_ita_par[izNeighMinor[i] - StartMinor];
opp_nu = shared_nu[izNeighMinor[i] - StartMinor];
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opp_B = shared_B_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_ita = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
opp_nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
opp_B = p_B_minor[izNeighMinor[i]].xypart();
opp_ita = p_ita_parallel_elec_minor[izNeighMinor[i]];
opp_nu = p_nu_elec_minor[izNeighMinor[i]];
if (opp_ita == 0.0) bUsableSide = false;
}
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opp_B = Clockwise_d*opp_B;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opp_B = Anticlockwise_d*opp_B;
}
if (shared_ita_par[threadIdx.x] < opp_ita) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = opp_ita;
nu = opp_nu;
}
omega_ce = 0.5*qovermc*(Make3(opp_B + shared_B[threadIdx.x], BZ_CONSTANT)); // NOTE BENE qoverMc
}
f64_vec2 gradvez, gradvx, gradvy;
f64_vec2 edge_normal; // a reason why storing position > loading.
f64_vec3 htg_diff;
if (bUsableSide)
{
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
v4 opp_v, next_v, prev_v;
f64_vec2 opppos, nextpos, prevpos;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_vie[izNeighMinor[iprev] - StartMinor]), sizeof(v4));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_vie_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_vie_minor[izNeighMinor[iprev]]), sizeof(v4));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v.vxy = Clockwise_d*prev_v.vxy;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v.vxy = Anticlockwise_d*prev_v.vxy;
}
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_vie[izNeighMinor[i] - StartMinor]), sizeof(v4));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_vie_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_vie_minor[izNeighMinor[i]]), sizeof(v4));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v.vxy = Clockwise_d*opp_v.vxy;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v.vxy = Anticlockwise_d*opp_v.vxy;
}
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_vie[izNeighMinor[inext] - StartMinor]), sizeof(v4));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_vie_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(v4));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_vie_minor[izNeighMinor[inext]]), sizeof(v4));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v.vxy = Clockwise_d*next_v.vxy;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v.vxy = Anticlockwise_d*next_v.vxy;
}
// New definition of endpoint of minor edge:
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.x = 0.5*(
(shared_vie[threadIdx.x].vez + next_v.vez)*(info.pos.y - nextpos.y)
+ (prev_v.vez + shared_vie[threadIdx.x].vez)*(prevpos.y - info.pos.y)
+ (opp_v.vez + prev_v.vez)*(opppos.y - prevpos.y)
+ (next_v.vez + opp_v.vez)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvez.y = -0.5*(
(shared_vie[threadIdx.x].vez + next_v.vez)*(info.pos.x - nextpos.x)
+ (prev_v.vez + shared_vie[threadIdx.x].vez)*(prevpos.x - info.pos.x)
+ (opp_v.vez + prev_v.vez)*(opppos.x - prevpos.x)
+ (next_v.vez + opp_v.vez)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
if (prev_v.vxy.x == 0.0) // prev is in the insulator.
{
// do like the above but it goes (ours, next, opp) somehow?
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
gradvx.x = 0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.y - nextpos.y)
+ (opp_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(opppos.y - info.pos.y)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(shared_vie[threadIdx.x].vxy.x + next_v.vxy.x)*(info.pos.x - nextpos.x)
+ (opp_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(opppos.x - info.pos.x)
+ (next_v.vxy.x + opp_v.vxy.x)*(nextpos.x - opppos.x)
) / area_triangle;
gradvy.x = 0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.y - nextpos.y)
+ (opp_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(opppos.y - info.pos.y)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(shared_vie[threadIdx.x].vxy.y + next_v.vxy.y)*(info.pos.x - nextpos.x)
+ (opp_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(opppos.x - info.pos.x)
+ (next_v.vxy.y + opp_v.vxy.y)*(nextpos.x - opppos.x)
) / area_triangle;
gradvez.x = 0.5*(
(shared_vie[threadIdx.x].vez + next_v.vez)*(info.pos.y - nextpos.y)
+ (opp_v.vez + shared_vie[threadIdx.x].vez)*(opppos.y - info.pos.y)
+ (next_v.vez + opp_v.vez)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvez.y = -0.5*(
(shared_vie[threadIdx.x].vez + next_v.vez)*(info.pos.x - nextpos.x)
+ (opp_v.vez + shared_vie[threadIdx.x].vez)*(opppos.x - info.pos.x)
+ (next_v.vez + opp_v.vez)*(nextpos.x - opppos.x)
) / area_triangle;
}
else {
if (next_v.vxy.x == 0.0) // next is in the insulator
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
gradvx.x = 0.5*(
(prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vxy.x + opp_v.vxy.x)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(prev_v.vxy.x + shared_vie[threadIdx.x].vxy.x)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.x + prev_v.vxy.x)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vxy.x + opp_v.vxy.x)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.x = 0.5*(
(prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.y - info.pos.y)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vxy.y + opp_v.vxy.y)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(prev_v.vxy.y + shared_vie[threadIdx.x].vxy.y)*(prevpos.x - info.pos.x)
+ (opp_v.vxy.y + prev_v.vxy.y)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vxy.y + opp_v.vxy.y)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradvez.x = 0.5*(
(prev_v.vez + shared_vie[threadIdx.x].vez)*(prevpos.y - info.pos.y)
+ (opp_v.vez + prev_v.vez)*(opppos.y - prevpos.y)
+ (shared_vie[threadIdx.x].vez + opp_v.vez)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvez.y = -0.5*(
(prev_v.vez + shared_vie[threadIdx.x].vez)*(prevpos.x - info.pos.x)
+ (opp_v.vez + prev_v.vez)*(opppos.x - prevpos.x)
+ (shared_vie[threadIdx.x].vez + opp_v.vez)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
} else {
printf("\n\n\nDid not make sense! Alert RING-TAILED LEMUR. iMinor %d iNeigh %d \n\n\n\a", iMinor,
izNeighMinor[i]);
};
};
};
};
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
if (TEST_ELEC_VISC_TRI) printf("%d prev_v %1.14E opp_v %1.14E next_v %1.14E our_v %1.14E omega %1.8E %1.8E\n",
iMinor, prev_v.vez, opp_v.vez, next_v.vez, shared_vie[threadIdx.x].vez,
omega_ce.x, omega_ce.y);
htg_diff.x = shared_vie[threadIdx.x].vxy.x - opp_v.vxy.x;
htg_diff.y = shared_vie[threadIdx.x].vxy.y - opp_v.vxy.y;
htg_diff.z = shared_vie[threadIdx.x].vez - opp_v.vez;
}
// Wouldn't it be nice if we could now drop all our prev_v variables and pick them up again on the next
// go around?
// That's really what I think we need.
if (bUsableSide)
{
if ((VISCMAG == 0) || (omega_ce.dot(omega_ce) < 0.1*0.1*nu*nu))
{
// run unmagnetised case
f64 Pi_xx, Pi_xy, Pi_yx, Pi_yy, Pi_zx, Pi_zy;
Pi_xx = -ita_par*THIRD*(4.0*gradvx.x - 2.0*gradvy.y);
Pi_xy = -ita_par*(gradvx.y + gradvy.x);
Pi_yx = Pi_xy;
Pi_yy = -ita_par*THIRD*(4.0*gradvy.y - 2.0*gradvx.x);
Pi_zx = -ita_par*(gradvez.x);
Pi_zy = -ita_par*(gradvez.y);
f64_vec3 visc_contrib;
visc_contrib.x = -over_m_e*(Pi_xx*edge_normal.x + Pi_xy*edge_normal.y);
visc_contrib.y = -over_m_e*(Pi_yx*edge_normal.x + Pi_yy*edge_normal.y);
visc_contrib.z = -over_m_e*(Pi_zx*edge_normal.x + Pi_zy*edge_normal.y);
ownrates_visc += visc_contrib;
if (i % 2 != 0)
visc_htg += -THIRD*m_e*(htg_diff.dot(visc_contrib));
if (TESTVISC)
printf("\n%d : %d : ita %1.8E gradvz %1.9E %1.9E ourpos %1.9E %1.9E visc_contrib.z %1.10E visc_htg %1.10E\n",
iMinor, izNeighMinor[i], ita_par,
gradvez.x,gradvez.y, info.pos.x,info.pos.y,
visc_contrib.z, visc_htg);
// 42939: Find out why it makes too much heat. Probably a compound error.
// if (iMinor == 42939) printf("42939\nour_v %1.8E %1.8E %1.8E \n"
// "opp_v %1.8E %1.8E %1.8E \n"
// "visc_contrib %1.8E %1.8E %1.8E \n",
// our_v.vxy.x, our_v.vxy.y, our_v.vez,
// opp_v.vxy.x, opp_v.vxy.y, opp_v.vez,
// visc_contrib.x, visc_contrib.y, visc_contrib.z);
//
} else {
f64_vec3 unit_b, unit_perp, unit_Hall;
f64 omegamod;
{
// f64_vec2 edge_normal;
// edge_normal.x = THIRD * (nextpos.y - prevpos.y);
// edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
f64 omegasq = omega_ce.dot(omega_ce);
omegamod = sqrt(omegasq);
unit_b = omega_ce / omegamod;
unit_perp = Make3(edge_normal, 0.0) - unit_b*(unit_b.dotxy(edge_normal));
unit_perp = unit_perp / unit_perp.modulus();
unit_Hall = unit_b.cross(unit_perp); // Note sign.
// store omegamod instead.
}
f64 Pi_b_b = 0.0, Pi_P_b = 0.0, Pi_P_P = 0.0, Pi_H_b = 0.0, Pi_H_P = 0.0, Pi_H_H = 0.0;
{
f64 W_bb = 0.0, W_bP = 0.0, W_bH = 0.0, W_PP = 0.0, W_PH = 0.0, W_HH = 0.0; // these have to be alive at same time as 9 x partials
{
f64_vec3 intermed;
// use: d vb / da = b transpose [ dvi/dxj ] a
// Prototypical element: a.x b.y dvy/dx
// b.x a.y dvx/dy
intermed.x = unit_b.dotxy(gradvx);
intermed.y = unit_b.dotxy(gradvy);
intermed.z = unit_b.dotxy(gradvez);
{
f64 dvb_by_db, dvperp_by_db, dvHall_by_db;
dvb_by_db = unit_b.dot(intermed);
dvperp_by_db = unit_perp.dot(intermed);
dvHall_by_db = unit_Hall.dot(intermed);
W_bb += 4.0*THIRD*dvb_by_db;
W_bP += dvperp_by_db;
W_bH += dvHall_by_db;
W_PP -= 2.0*THIRD*dvb_by_db;
W_HH -= 2.0*THIRD*dvb_by_db;
}
{
f64 dvb_by_dperp, dvperp_by_dperp,
dvHall_by_dperp;
// Optimize by getting rid of different labels.
intermed.x = unit_perp.dotxy(gradvx);
intermed.y = unit_perp.dotxy(gradvy);
intermed.z = unit_perp.dotxy(gradvez);
dvb_by_dperp = unit_b.dot(intermed);
dvperp_by_dperp = unit_perp.dot(intermed);
dvHall_by_dperp = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvperp_by_dperp;
W_PP += 4.0*THIRD*dvperp_by_dperp;
W_HH -= 2.0*THIRD*dvperp_by_dperp;
W_bP += dvb_by_dperp;
W_PH += dvHall_by_dperp;
}
{
f64 dvb_by_dHall, dvperp_by_dHall, dvHall_by_dHall;
intermed.x = unit_Hall.dotxy(gradvx);
intermed.y = unit_Hall.dotxy(gradvy);
intermed.z = unit_Hall.dotxy(gradvez);
dvb_by_dHall = unit_b.dot(intermed);
dvperp_by_dHall = unit_perp.dot(intermed);
dvHall_by_dHall = unit_Hall.dot(intermed);
W_bb -= 2.0*THIRD*dvHall_by_dHall;
W_PP -= 2.0*THIRD*dvHall_by_dHall;
W_HH += 4.0*THIRD*dvHall_by_dHall;
W_bH += dvb_by_dHall;
W_PH += dvperp_by_dHall;
}
}
{
{
f64 ita_1 = ita_par*(nu*nu / (nu*nu + omegamod*omegamod));
Pi_b_b += -ita_par*W_bb;
Pi_P_P += -0.5*(ita_par + ita_1)*W_PP - 0.5*(ita_par - ita_1)*W_HH;
Pi_H_H += -0.5*(ita_par + ita_1)*W_HH - 0.5*(ita_par - ita_1)*W_PP;
Pi_H_P += -ita_1*W_PH;
}
{
f64 ita_2 = ita_par*(nu*nu / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_2*W_bP;
Pi_H_b += -ita_2*W_bH;
}
{
f64 ita_3 = ita_par*(nu*omegamod / (nu*nu + omegamod*omegamod));
Pi_P_P -= ita_3*W_PH;
Pi_H_H += ita_3*W_PH;
Pi_H_P += 0.5*ita_3*(W_PP - W_HH);
}
{
f64 ita_4 = 0.5*ita_par*(nu*omegamod / (nu*nu + 0.25*omegamod*omegamod));
Pi_P_b += -ita_4*W_bH;
Pi_H_b += ita_4*W_bP;
}
}
}
f64 momflux_b, momflux_perp, momflux_Hall;
{
f64_vec3 mag_edge;
// f64_vec2 edge_normal;
// edge_normal.x = THIRD * (nextpos.y - prevpos.y);
// edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
// Most efficient way: compute mom flux in magnetic coords
mag_edge.x = unit_b.x*edge_normal.x + unit_b.y*edge_normal.y;
mag_edge.y = unit_perp.x*edge_normal.x + unit_perp.y*edge_normal.y;
mag_edge.z = unit_Hall.x*edge_normal.x + unit_Hall.y*edge_normal.y;
momflux_b = -(Pi_b_b*mag_edge.x + Pi_P_b*mag_edge.y + Pi_H_b*mag_edge.z);
momflux_perp = -(Pi_P_b*mag_edge.x + Pi_P_P*mag_edge.y + Pi_H_P*mag_edge.z);
momflux_Hall = -(Pi_H_b*mag_edge.x + Pi_H_P*mag_edge.y + Pi_H_H*mag_edge.z);
}
// unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall
// is the flow of p_x dotted with the edge_normal
// ownrates will be divided by N to give dv/dt
// m N dvx/dt = integral div momflux_x
// Therefore divide here just by m
f64_vec3 visc_contrib;
visc_contrib.x = over_m_e*(unit_b.x*momflux_b + unit_perp.x*momflux_perp + unit_Hall.x*momflux_Hall);
visc_contrib.y = over_m_e*(unit_b.y*momflux_b + unit_perp.y*momflux_perp + unit_Hall.y*momflux_Hall);
visc_contrib.z = over_m_e*(unit_b.z*momflux_b + unit_perp.z*momflux_perp + unit_Hall.z*momflux_Hall);
ownrates_visc += visc_contrib;
if (TEST_ELEC_VISC_TRI) printf(
"%d ownrates_visc.z %1.14E visc_contrib.z %1.14E 1/m_e %1.14E\n"
"unit_b.z %1.14E unit_perp.z %1.14E unit_Hall.z %1.14E\n"
"momflux b perp Hall %1.14E %1.14E %1.14E gradvez %1.14E %1.14E\n",
iMinor, ownrates_visc.z, visc_contrib.z, over_m_e, unit_b.z,
unit_perp.z, unit_Hall.z, momflux_b, momflux_perp, momflux_Hall,
gradvez.x, gradvez.y);
if (i % 2 != 0)
visc_htg += -THIRD*m_e*(htg_diff.dot(visc_contrib));
}
}; // bUsableSide
// endpt0 = endpt1;
};
f64_vec3 ownrates;
memcpy(&(ownrates), &(p_MAR_elec[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_elec[iMinor]), &(ownrates), sizeof(f64_vec3));
p_NT_addition_tri[iMinor].NeTe += visc_htg;
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor e %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor e %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor e %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor e %d NAN VISC HTG\n", iMinor);
#endif
if (TESTVISC) {
if (ownrates.x != ownrates.x)
printf("iMinor e %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor e %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor e %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor e %d NAN VISC HTG\n", iMinor);
}
} else {
// Not domain, not crossing_ins, not a frill
} // non-domain tri
}; // was it FRILL
}
// Neutral routine:
__global__ void kernelCreate_neutral_viscous_contrib_to_MAR_and_NT(
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_v_n_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_neut_minor, //
f64 * __restrict__ p_nu_neut_minor, //
f64_vec3 * __restrict__ p_MAR_neut,
NTrates * __restrict__ p_NT_addition_rate,
NTrates * __restrict__ p_NT_addition_tri)
{
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// There is room for some more double in shared per thread.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 ownrates_visc;
f64 visc_htg;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_ita_par[threadIdx.x] = p_ita_neut_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_neut_minor[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = p_ita_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
}
else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
// JUST TO GET IT TO RUN: LIMIT OURSELVES TO RADIUS 4.5 :
if ((info.flag == DOMAIN_VERTEX) && (info.pos.modulus() < 4.5)
&& (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
#pragma unroll
for (short i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
{
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
}
else {
f64 ita_theirs = p_ita_neut_minor[izTri[i]];
f64 nu_theirs = p_nu_neut_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = ita_theirs;
nu = nu_theirs;
};
// I understand why we are still doing minimum ita at the wall but we would ideally like to stop.
};
} // Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
f64_vec2 gradvx, gradvy, gradvz;
f64_vec3 htg_diff;
f64_vec2 edge_normal;
if (ita_par > 0.0) // note it was the minimum taken.
{
f64_vec3 opp_v, prev_v, next_v;
f64_vec2 opppos, prevpos, nextpos;
// ideally we might want to leave position out of the loop so that we can avoid reloading it.
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_v_n[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_v_n_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E izTri[i] %d \n", opp_v.x, izTri[i]);
}
else {
opp_v = p_v_n_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E v_n_minor izTri[i] %d \n", opp_v.x, izTri[i]);
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_v_n[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_v_n_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x);
gradvx.x = 0.5*(
(shared_v_n_verts[threadIdx.x].x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + shared_v_n_verts[threadIdx.x].x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_v_n_verts[threadIdx.x].x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + shared_v_n_verts[threadIdx.x].x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_v_n_verts[threadIdx.x].y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + shared_v_n_verts[threadIdx.x].y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_v_n_verts[threadIdx.x].y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + shared_v_n_verts[threadIdx.x].y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
//
// if (TEST) printf(
// "iVertex %d our_v.y next prev opp %1.8E %1.8E %1.8E %1.8E\n"
// "area_quad %1.8E \n"
// "info.pos %1.8E %1.8E opppos %1.8E %1.8E prev %1.8E %1.8E next %1.8E %1.8E\n",
// iVertex, our_v.vxy.y, next_v.vxy.y, prev_v.vxy.y, opp_v.vxy.y,
// area_quadrilateral,
// info.pos.x, info.pos.y, opppos.x, opppos.y, prevpos.x, prevpos.y, nextpos.x, nextpos.y);
//
gradvz.x = 0.5*(
(shared_v_n_verts[threadIdx.x].z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + shared_v_n_verts[threadIdx.x].z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvz.y = -0.5*(
(shared_v_n_verts[threadIdx.x].z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + shared_v_n_verts[threadIdx.x].z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
htg_diff.x = shared_v_n_verts[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n_verts[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n_verts[threadIdx.x].z - opp_v.z;
if (TESTNEUTVISC)
printf("============================\nNeutral viscosity %d tri %d ita_par %1.10E\n"
"v %1.9E %1.9E %1.9E opp_v %1.9E %1.9E %1.9E\n"
"gradvx %1.9E %1.9E gradvy %1.9E %1.9E gradvz %1.9E %1.9E \n"
"ourpos %1.8E %1.8E prevpos %1.8E %1.8E opppos %1.8E %1.8E nextpos %1.8E %1.8E edge_nor %1.9E %1.9E\n"
,
iVertex, izTri[i], ita_par,
shared_v_n_verts[threadIdx.x].x, shared_v_n_verts[threadIdx.x].y,
shared_v_n_verts[threadIdx.x].z, opp_v.x, opp_v.y, opp_v.z,
gradvx.x, gradvx.y, gradvy.x, gradvy.y, gradvz.x, gradvz.y,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
edge_normal.x, edge_normal.y);
}
// Order of calculations may help things to go out/into scope at the right times so careful with that.
// we also want to get nu from somewhere. So precompute nu at the time we precompute ita_e = n Te / nu_e, ita_i = n Ti / nu_i.
if (ita_par > 0.0)
{
// For neutral fluid viscosity does not involve dimensional transfers.
f64_vec3 visc_contrib;
visc_contrib.x = over_m_n*(ita_par*gradvx.dot(edge_normal)); // if we are looking at higher vz looking out, go up.
visc_contrib.y = over_m_n*(ita_par*gradvy.dot(edge_normal));
visc_contrib.z = over_m_n*(ita_par*gradvz.dot(edge_normal));
// if (iVertex == VERTCHOSEN) {
// printf("visc_contrib %1.9E %1.9E %1.9E ita %1.10E \n",
// visc_contrib.x, visc_contrib.y, visc_contrib.z, ita_par);
// }
ownrates_visc += visc_contrib;
visc_htg += -THIRD*m_n*(htg_diff.dot(visc_contrib));
if (TESTNEUTVISC)
printf("htg_diff %1.9E %1.9E %1.9E visc_contrib %1.9E %1.9E %1.9E visc_htg %1.10E\n"
,
htg_diff.x, htg_diff.y, htg_diff.z, visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg
);
}
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
p_NT_addition_rate[iVertex].NnTn += visc_htg;
if (TESTNEUTVISC) {
printf("%d : cumulative d/dt NnTn %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NnTn);
};
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex %d NaN ownrates.x\n", iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex %d NAN VISC HTG\n", iVertex);
#endif
}
else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
// memcpy(&(ownrates), &(p_MAR_ion[iMinor]), sizeof(f64_vec3));
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
{
long izNeighMinor[6];
char szPBC[6];
if (TESTNEUTVISC2) printf("%d info.flag %d ita_ours %1.8E \n", iMinor, info.flag, shared_ita_par[threadIdx.x]);
// JUST TO GET IT TO RUN:
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) &&
(info.pos.modulus() < 4.9) && (shared_ita_par[threadIdx.x] > 0.0)) {
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (short i = 0; i < 6; i++)
{
bool bUsableSide = true;
{
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
f64 ita_par_opp = p_ita_neut_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_neut_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
};
}
}
// basically bUsableSide here just depends on whether min(ita, ita_opp) == 0.
f64_vec2 gradvx, gradvy, gradvz;
f64_vec2 edge_normal;
f64_vec3 htg_diff;
if (bUsableSide)
{
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
f64_vec3 prev_v, opp_v, next_v;
f64_vec2 prevpos, nextpos, opppos;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_v_n[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_v_n_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_v_n_minor[izNeighMinor[iprev]]), sizeof(f64_vec3));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
};
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_v_n[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_v_n_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_v_n_minor[izNeighMinor[inext]]), sizeof(f64_vec3));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
f64 area_quadrilateral = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y)
);
gradvx.x = 0.5*(
(shared_v_n[threadIdx.x].x + next_v.x)*(info.pos.y - nextpos.y)
+ (prev_v.x + shared_v_n[threadIdx.x].x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvx.y = -0.5*(
(shared_v_n[threadIdx.x].x + next_v.x)*(info.pos.x - nextpos.x)
+ (prev_v.x + shared_v_n[threadIdx.x].x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.x = 0.5*(
(shared_v_n[threadIdx.x].y + next_v.y)*(info.pos.y - nextpos.y)
+ (prev_v.y + shared_v_n[threadIdx.x].y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvy.y = -0.5*(
(shared_v_n[threadIdx.x].y + next_v.y)*(info.pos.x - nextpos.x)
+ (prev_v.y + shared_v_n[threadIdx.x].y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvz.x = 0.5*(
(shared_v_n[threadIdx.x].z + next_v.z)*(info.pos.y - nextpos.y)
+ (prev_v.z + shared_v_n[threadIdx.x].z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_quadrilateral;
gradvz.y = -0.5*(
(shared_v_n[threadIdx.x].z + next_v.z)*(info.pos.x - nextpos.x)
+ (prev_v.z + shared_v_n[threadIdx.x].z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_quadrilateral;
if (TESTNEUTVISC2) {
printf("%d i %d prev_v %1.10E our_v %1.10E opp_v %1.10E next_v %1.10E\n",
iMinor, i, prev_v.y, shared_v_n[threadIdx.x].y, opp_v.y, next_v.y);
};
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
gradvx.x = 0.0;
gradvx.y = 0.0;
gradvy.x = 0.0;
gradvy.y = 0.0;
gradvz.x = 0.0;
gradvz.y = 0.0;
bUsableSide = 0;
/*
if (prev_v.x == 0.0) // prev is in the insulator. ---- this seems like a dodgy way of trying to know this.
{
// do like the above but it goes (ours, next, opp) somehow?
f64 area_triangle = 0.5*(
(info.pos.x + nextpos.x)*(info.pos.y - nextpos.y)
+ (opppos.x + info.pos.x)*(opppos.y - info.pos.y)
+ (nextpos.x + opppos.x)*(nextpos.y - opppos.y));
gradvx.x = 0.5*(
(shared_v_n[threadIdx.x].x + next_v.x)*(info.pos.y - nextpos.y)
+ (opp_v.x + shared_v_n[threadIdx.x].x)*(opppos.y - info.pos.y)
+ (next_v.x + opp_v.x)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(shared_v_n[threadIdx.x].x + next_v.x)*(info.pos.x - nextpos.x)
+ (opp_v.x + shared_v_n[threadIdx.x].x)*(opppos.x - info.pos.x)
+ (next_v.x + opp_v.x)*(nextpos.x - opppos.x)
) / area_triangle;
gradvy.x = 0.5*(
(shared_v_n[threadIdx.x].y + next_v.y)*(info.pos.y - nextpos.y)
+ (opp_v.y + shared_v_n[threadIdx.x].y)*(opppos.y - info.pos.y)
+ (next_v.y + opp_v.y)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(shared_v_n[threadIdx.x].y + next_v.y)*(info.pos.x - nextpos.x)
+ (opp_v.y + shared_v_n[threadIdx.x].y)*(opppos.x - info.pos.x)
+ (next_v.y + opp_v.y)*(nextpos.x - opppos.x)
) / area_triangle;
gradvz.x = 0.5*(
(shared_v_n[threadIdx.x].z + next_v.z)*(info.pos.y - nextpos.y)
+ (opp_v.z + shared_v_n[threadIdx.x].z)*(opppos.y - info.pos.y)
+ (next_v.z + opp_v.z)*(nextpos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvz.y = -0.5*(
(shared_v_n[threadIdx.x].z + next_v.z)*(info.pos.x - nextpos.x)
+ (opp_v.z + shared_v_n[threadIdx.x].z)*(opppos.x - info.pos.x)
+ (next_v.z + opp_v.z)*(nextpos.x - opppos.x)
) / area_triangle;
if (TESTNEUTVISC2) {
printf("%d i %d PREVV=0 our_v %1.10E opp_v %1.10E next_v %1.10E\n",
iMinor, i, shared_v_n[threadIdx.x].y, opp_v.y, next_v.y);
};
}
else {
if (next_v.x == 0.0) // next is in the insulator
{
f64 area_triangle = 0.5*(
(prevpos.x + info.pos.x)*(prevpos.y - info.pos.y)
+ (opppos.x + prevpos.x)*(opppos.y - prevpos.y)
+ (info.pos.x + opppos.x)*(info.pos.y - opppos.y)
);
gradvx.x = 0.5*(
(prev_v.x + shared_v_n[threadIdx.x].x)*(prevpos.y - info.pos.y)
+ (opp_v.x + prev_v.x)*(opppos.y - prevpos.y)
+ (shared_v_n[threadIdx.x].x + opp_v.x)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvx.y = -0.5*(
(prev_v.x + shared_v_n[threadIdx.x].x)*(prevpos.x - info.pos.x)
+ (opp_v.x + prev_v.x)*(opppos.x - prevpos.x)
+ (shared_v_n[threadIdx.x].x + opp_v.x)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.x = 0.5*(
(prev_v.y + shared_v_n[threadIdx.x].y)*(prevpos.y - info.pos.y)
+ (opp_v.y + prev_v.y)*(opppos.y - prevpos.y)
+ (shared_v_n[threadIdx.x].y + opp_v.y)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvy.y = -0.5*(
(prev_v.y + shared_v_n[threadIdx.x].y)*(prevpos.x - info.pos.x)
+ (opp_v.y + prev_v.y)*(opppos.x - prevpos.x)
+ (shared_v_n[threadIdx.x].y + opp_v.y)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
if (TESTNEUTVISC2) {
printf("%d i %d NEXTV=0 our_v %1.10E opp_v %1.10E prev_v %1.10E\n",
iMinor, i, shared_v_n[threadIdx.x].y, opp_v.y, prev_v.y);
};
gradvz.x = 0.5*(
(prev_v.z + shared_v_n[threadIdx.x].z)*(prevpos.y - info.pos.y)
+ (opp_v.z + prev_v.z)*(opppos.y - prevpos.y)
+ (shared_v_n[threadIdx.x].z + opp_v.z)*(info.pos.y - opppos.y) // nextpos = pos_anti, assumed
) / area_triangle;
gradvz.y = -0.5*(
(prev_v.z + shared_v_n[threadIdx.x].z)*(prevpos.x - info.pos.x)
+ (opp_v.z + prev_v.z)*(opppos.x - prevpos.x)
+ (shared_v_n[threadIdx.x].z + opp_v.z)*(info.pos.x - opppos.x) // nextpos = pos_anti, assumed
) / area_triangle;
}
else {
printf("\n\n\nDid not make sense! Alert RING-TAILED LEMUR. iMinor %d iNiegh %d \n"
"izNeighMinor[inext] %d izNeighMinor[iprev] %d flag %d %d \n"
"prev_v.x %1.8E next_v.x %1.8E \n"
"\n\n\a", iMinor,
izNeighMinor[i],
izNeighMinor[inext], izNeighMinor[iprev], p_info_minor[izNeighMinor[inext]].flag,
p_info_minor[izNeighMinor[iprev]].flag, prev_v.x, next_v.x);
};
};
*/
};
};
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
htg_diff.x = shared_v_n[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n[threadIdx.x].z - opp_v.z;
// if (iMinor == CHOSEN) printf("============================\nNeutral viscosity %d %d\n"
// "v.x %1.9E opp_v.x %1.9E prev_v.x %1.9E next_v.x %1.9E\n"
// "ourpos %1.9E %1.9E \n"
// "prevpos %1.9E %1.9E \n"
// "opppos %1.9E %1.9E \n"
// "nextpos %1.9E %1.9E \n"
// "gradvx %1.9E %1.9E gradvy %1.9E %1.9E edge_nor %1.9E %1.9E\n",
// iMinor, izNeighMinor[i],
// shared_v_n[threadIdx.x].x, opp_v.x, prev_v.x, next_v.x,
// info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
// gradvx.x, gradvx.y, gradvy.x, gradvy.y, edge_normal.x, edge_normal.y);
//
};
if (bUsableSide) {
f64_vec3 visc_contrib;
visc_contrib.x = over_m_n*ita_par*gradvx.dot(edge_normal);
visc_contrib.y = over_m_n*ita_par*gradvy.dot(edge_normal);
visc_contrib.z = over_m_n*ita_par*gradvz.dot(edge_normal);
// Set to 0 any that are pushing momentum uphill. For neutral this is unphysical.
// if (visc_contrib.x*htg_diff.x > 0.0) visc_contrib.x = 0.0;
// Can't do it because it'll ruin backward solve.
ownrates_visc += visc_contrib;
if (TESTNEUTVISC2) {
printf("%d i %d contrib.y %1.10E gradvy %1.10E %1.10E edge_nml %1.9E %1.9E ita %1.8E /m_n %1.8E cumu %1.9E\n",
iMinor, i, visc_contrib.y, gradvy.x, gradvy.y, edge_normal.x, edge_normal.y, ita_par, over_m_n, ownrates_visc.y);
};
if (i % 2 == 0) {
// vertex : heat collected by vertex
}
else {
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
};
}; // bUsableSide
};
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_neut[iMinor]), &(ownrates), sizeof(f64_vec3));
p_NT_addition_tri[iMinor].NnTn += visc_htg;
// We will have to round this up into the vertex heat afterwards.
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor %d NAN VISC HTG\n", iMinor);
#endif
// We do best by taking each boundary, considering how
// much heat to add for each one.
}
else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
}
__global__ void kernelExpandSelectFlagIta(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
long * __restrict__ p_izNeighMinor,
int * __restrict__ p_iSelectFlag,
int * __restrict__ p_iSelectflagNeut,
int const number
) {
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
long izTri[MAXNEIGH_d];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
if (p_iSelectFlag[iVertex + BEGINNING_OF_CENTRAL] == 0) {
bool found = false;
for (short i = 0; ((i < tri_len) && (found == false)); i++)
{
if (p_iSelectFlag[izTri[i]] == number) {
found = true;
};
};
if (found) p_iSelectFlag[iVertex + BEGINNING_OF_CENTRAL] = number + 1;
};
if (p_iSelectflagNeut[iVertex + BEGINNING_OF_CENTRAL] == 0) {
bool found = false;
for (short i = 0; ((i < tri_len) && (found == false)); i++)
{
if (p_iSelectflagNeut[izTri[i]] == number) {
found = true;
};
};
if (found) p_iSelectflagNeut[iVertex + BEGINNING_OF_CENTRAL] = number + 1;
};
};
info = p_info_minor[iMinor];
long izNeighMinor[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
if (p_iSelectFlag[iMinor] == 0) {
bool found = false;
for (short i = 0; ((i < 6) && (found == false)); i++)
{
if (p_iSelectFlag[izNeighMinor[i]] == number) {
found = true;
};
};
if (found) p_iSelectFlag[iMinor] = number + 1;
};
if (p_iSelectflagNeut[iMinor] == 0) {
bool found = false;
for (short i = 0; ((i < 6) && (found == false)); i++)
{
if (p_iSelectflagNeut[izNeighMinor[i]] == number) {
found = true;
};
};
if (found) p_iSelectflagNeut[iMinor] = number + 1;
};
}
// Neutral routine:
__global__ void kernelCreate_neutral_viscous_contrib_to_MAR_and_NT_Geometric(
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_v_n_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_neut_minor, //
f64 * __restrict__ p_nu_neut_minor, //
f64_vec3 * __restrict__ p_MAR_neut,
NTrates * __restrict__ p_NT_addition_rate,
NTrates * __restrict__ p_NT_addition_tri,
int * __restrict__ p_Select)
{
// ************************************************************************
// *********** WATCH OUT ************************************************
// ************************************************************************
// A copy of this routine with fixed flows only is in heatflux.cu
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// There is room for some more double in shared per thread.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 ownrates_visc;
f64 visc_htg;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_ita_par[threadIdx.x] = p_ita_neut_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_neut_minor[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = p_ita_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
}
else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
// JUST TO GET IT TO RUN: LIMIT OURSELVES TO RADIUS 4.9 :
// !
if ((info.flag == DOMAIN_VERTEX)
// && (info.pos.modulus() < 4.9) -- if we have this then need in d/dbeta also.
&& (shared_ita_par_verts[threadIdx.x] > 0.0)
&& (p_Select[iVertex + BEGINNING_OF_CENTRAL] != 0)
)
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
#pragma unroll
for (short i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
// Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
if (p_Select[izTri[i]] != 0) {
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
}
else {
f64 ita_theirs = p_ita_neut_minor[izTri[i]];
f64 nu_theirs = p_nu_neut_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = ita_theirs;
nu = nu_theirs;
};
// I understand why we are still doing minimum ita at the wall but we would ideally like to stop.
};
f64_vec2 gradvx, gradvy, gradvz;
f64_vec3 htg_diff;
f64_vec2 edge_normal;
if (ita_par > 0.0) // note it was the minimum taken.
{
f64_vec3 opp_v, prev_v, next_v;
f64_vec2 opppos, prevpos, nextpos;
// ideally we might want to leave position out of the loop so that we can avoid reloading it.
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_v_n[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_v_n_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E izTri[i] %d \n", opp_v.x, izTri[i]);
}
else {
opp_v = p_v_n_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E v_n_minor izTri[i] %d \n", opp_v.x, izTri[i]);
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_v_n[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_v_n_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
}
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x);
if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
edge_normal = ReconstructEdgeNormal(prevpos, info.pos, nextpos, opppos);
#ifdef INS_INS_3POINT
if (TestDomainPos(prevpos) == false) {
gradvx = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n_verts[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n_verts[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n_verts[threadIdx.x].z, next_v.z, opp_v.z
);
}
else {
if (TestDomainPos(nextpos) == false) {
gradvx = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n_verts[threadIdx.x].x, opp_v.x
);
gradvy = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n_verts[threadIdx.x].y, opp_v.y
);
gradvz = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n_verts[threadIdx.x].z, opp_v.z
);
}
else {
gradvx = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n_verts[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n_verts[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n_verts[threadIdx.x].z, next_v.z, opp_v.z
);
};
};
#else
if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
{
// One of the sides is dipped under the insulator -- set transverse deriv to 0.
// Bear in mind we are looking from a vertex into a tri, it can be ins tri.
gradvx = (opp_v.x - shared_v_n_verts[threadIdx.x].x)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
gradvy = (opp_v.y - shared_v_n_verts[threadIdx.x].y)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
gradvz = (opp_v.z - shared_v_n_verts[threadIdx.x].z)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
}
else {
gradvx = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n_verts[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n_verts[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n_verts[threadIdx.x].z, next_v.z, opp_v.z
);
// Could switch to the 3 in one function that handles all 3. in one.
};
// Simplify:
#endif
htg_diff.x = shared_v_n_verts[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n_verts[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n_verts[threadIdx.x].z - opp_v.z;
if (TESTNEUTVISC)
printf("============================\nNeutral viscosity %d tri %d ita_par %1.10E\n"
"v %1.9E %1.9E %1.9E opp_v %1.9E %1.9E %1.9E\n"
"gradvx %1.9E %1.9E gradvy %1.9E %1.9E gradvz %1.9E %1.9E \n"
"ourpos %1.8E %1.8E prevpos %1.8E %1.8E opppos %1.8E %1.8E nextpos %1.8E %1.8E edge_nor %1.9E %1.9E\n"
,
iVertex, izTri[i], ita_par,
shared_v_n_verts[threadIdx.x].x, shared_v_n_verts[threadIdx.x].y,
shared_v_n_verts[threadIdx.x].z, opp_v.x, opp_v.y, opp_v.z,
gradvx.x, gradvx.y, gradvy.x, gradvy.y, gradvz.x, gradvz.y,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
edge_normal.x, edge_normal.y);
}
// Order of calculations may help things to go out/into scope at the right times so careful with that.
// we also want to get nu from somewhere. So precompute nu at the time we precompute ita_e = n Te / nu_e, ita_i = n Ti / nu_i.
if (ita_par > 0.0)
{
// For neutral fluid viscosity does not involve dimensional transfers.
f64_vec3 visc_contrib;
visc_contrib.x = over_m_n*(ita_par*gradvx.dot(edge_normal)); // if we are looking at higher vz looking out, go up.
visc_contrib.y = over_m_n*(ita_par*gradvy.dot(edge_normal));
visc_contrib.z = over_m_n*(ita_par*gradvz.dot(edge_normal));
// if (iVertex == VERTCHOSEN) {
// printf("visc_contrib %1.9E %1.9E %1.9E ita %1.10E \n",
// visc_contrib.x, visc_contrib.y, visc_contrib.z, ita_par);
// }
ownrates_visc += visc_contrib;
visc_htg += -THIRD*m_n*(htg_diff.dot(visc_contrib));
if (TESTNEUTVISC)
printf("htg_diff %1.9E %1.9E %1.9E visc_contrib %1.9E %1.9E %1.9E visc_htg %1.10E\n"
,
htg_diff.x, htg_diff.y, htg_diff.z, visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg
);
}
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
}; // p_Select
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
p_NT_addition_rate[iVertex].NnTn += visc_htg;
if (TESTNEUTVISC) {
printf("%d : cumulative d/dt NnTn %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NnTn);
};
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex %d NaN ownrates.x\n", iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex %d NAN VISC HTG\n", iVertex);
#endif
}
else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
// memcpy(&(ownrates), &(p_MAR_ion[iMinor]), sizeof(f64_vec3));
memset(&ownrates_visc, 0, sizeof(f64_vec3));
#ifdef COLLECT_VISC_HTG_IN_TRIANGLES
visc_htg = 0.0;
#else
f64 visc_htg0, visc_htg1, visc_htg2;
visc_htg0 = 0.0;
visc_htg1 = 0.0;
visc_htg2 = 0.0;
#endif
{
long izNeighMinor[6];
char szPBC[6];
if (TESTNEUTVISC2) printf("%d info.flag %d ita_ours %1.8E \n", iMinor, info.flag, shared_ita_par[threadIdx.x]);
// JUST TO GET IT TO RUN:
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS))
// && (info.pos.modulus() < 4.9) // if we have this then we have to have it in d/dbeta routine also.
&& (shared_ita_par[threadIdx.x] > 0.0)
&& (p_Select[iMinor] != 0)
) {
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (short i = 0; i < 6; i++)
{
if (p_Select[izNeighMinor[i]] != 0) {
bool bUsableSide = true;
{
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
f64 ita_par_opp = p_ita_neut_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_neut_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
};
}
}
// basically bUsableSide here just depends on whether min(ita, ita_opp) == 0.
bool bLongi = false;
#ifdef INS_INS_NONE
// Get rid of ins-ins triangle traffic:
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS)
bUsableSide = 0;
}
// if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
// bLongi = true;
// have to put it below
#else
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS)
bLongi = true;
}
#endif
f64_vec2 gradvx, gradvy, gradvz;
f64_vec2 edge_normal;
f64_vec3 htg_diff;
if (bUsableSide)
{
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
f64_vec3 prev_v, opp_v, next_v;
f64_vec2 prevpos, nextpos, opppos;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_v_n[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_v_n_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_v_n_minor[izNeighMinor[iprev]]), sizeof(f64_vec3));
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
RotateClockwise(prev_v);
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
RotateAnticlockwise(prev_v);
};
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_v_n[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_v_n_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_v_n_minor[izNeighMinor[inext]]), sizeof(f64_vec3));
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
RotateClockwise(next_v);
};
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
RotateAnticlockwise(next_v);
};
if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
bLongi = true;
#ifdef INS_INS_3POINT
if (TestDomainPos(prevpos) == false) {
gradvx = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
shared_v_n[threadIdx.x].z, next_v.z, opp_v.z
);
}
else {
if (TestDomainPos(nextpos) == false) {
gradvx = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n[threadIdx.x].x, opp_v.x
);
gradvy = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n[threadIdx.x].y, opp_v.y
);
gradvz = GetGradient_3Point(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n[threadIdx.x].z, opp_v.z
);
}
else {
gradvx = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n[threadIdx.x].z, next_v.z, opp_v.z
);
};
};
#else
if ((TestDomainPos(prevpos) == false) || (TestDomainPos(nextpos) == false))
{
// One of the sides is dipped under the insulator -- set transverse deriv to 0.
// Bear in mind we are looking from a vertex into a tri, it can be ins tri.
gradvx = (opp_v.x - shared_v_n[threadIdx.x].x)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
gradvy = (opp_v.y - shared_v_n[threadIdx.x].y)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
gradvz = (opp_v.z - shared_v_n[threadIdx.x].z)*(opppos - info.pos) /
(opppos - info.pos).dot(opppos - info.pos);
}
else {
gradvx = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.x, shared_v_n[threadIdx.x].x, next_v.x, opp_v.x
);
gradvy = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.y, shared_v_n[threadIdx.x].y, next_v.y, opp_v.y
);
gradvz = GetGradient(
//f64_vec2 prevpos, f64_vec2 ourpos, f64_vec2 nextpos, f64_vec2 opppos,
prevpos, info.pos, nextpos, opppos,
//f64 prev_v, f64 our_v, f64 next_v, f64 opp_v
prev_v.z, shared_v_n[threadIdx.x].z, next_v.z, opp_v.z
);
}
#endif
#ifdef INS_INS_NONE
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if (flag == CROSSING_INS) {
// just set it to 0.
bUsableSide = false;
gradvz.x = 0.0;
gradvz.y = 0.0;
gradvx.x = 0.0;
gradvx.y = 0.0;
gradvy.x = 0.0;
gradvy.y = 0.0;
};
};
#endif
htg_diff = shared_v_n[threadIdx.x] - opp_v;
if (TESTNEUTVISC2) {
printf("%d i %d prev_v %1.10E our_v %1.10E opp_v %1.10E next_v %1.10E\n",
iMinor, i, prev_v.y, shared_v_n[threadIdx.x].y, opp_v.y, next_v.y);
};
edge_normal.x = THIRD * (nextpos.y - prevpos.y);
edge_normal.y = THIRD * (prevpos.x - nextpos.x); // need to define so as to create unit vectors
// if (iMinor == CHOSEN) printf("============================\nNeutral viscosity %d %d\n"
// "v.x %1.9E opp_v.x %1.9E prev_v.x %1.9E next_v.x %1.9E\n"
// "ourpos %1.9E %1.9E \n"
// "prevpos %1.9E %1.9E \n"
// "opppos %1.9E %1.9E \n"
// "nextpos %1.9E %1.9E \n"
// "gradvx %1.9E %1.9E gradvy %1.9E %1.9E edge_nor %1.9E %1.9E\n",
// iMinor, izNeighMinor[i],
// shared_v_n[threadIdx.x].x, opp_v.x, prev_v.x, next_v.x,
// info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
// gradvx.x, gradvx.y, gradvy.x, gradvy.y, edge_normal.x, edge_normal.y);
//
if (bLongi) {
// move any edge_normal endpoints that are below the insulator,
// until they are above the insulator.
edge_normal = ReconstructEdgeNormal(
prevpos, info.pos, nextpos, opppos
);
};
};
if (bUsableSide) {
f64_vec3 visc_contrib;
visc_contrib.x = over_m_n*ita_par*gradvx.dot(edge_normal);
visc_contrib.y = over_m_n*ita_par*gradvy.dot(edge_normal);
visc_contrib.z = over_m_n*ita_par*gradvz.dot(edge_normal);
// Set to 0 any that are pushing momentum uphill. For neutral this is unphysical.
// if (visc_contrib.x*htg_diff.x > 0.0) visc_contrib.x = 0.0;
// Can't do it because it'll ruin backward solve.
ownrates_visc += visc_contrib;
if (TESTNEUTVISC2) {
printf("%d i %d contrib.y %1.10E gradvy %1.10E %1.10E edge_nml %1.9E %1.9E ita %1.8E /m_n %1.8E cumu %1.9E\n",
iMinor, i, visc_contrib.y, gradvy.x, gradvy.y, edge_normal.x, edge_normal.y, ita_par, over_m_n, ownrates_visc.y);
};
if (i % 2 == 0) {
// vertex : heat collected by vertex
}
else {
#ifdef COLLECT_VISC_HTG_IN_TRIANGLES
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
#else
f64 heat_addn = -THIRD*m_ion*(htg_diff.dot(visc_contrib));
if (i == 1) {
visc_htg0 += 0.5*heat_addn;
visc_htg1 += 0.5*heat_addn;
}
else {
if (i == 3) {
visc_htg1 += 0.5*heat_addn;
visc_htg2 += 0.5*heat_addn;
}
else {
visc_htg0 += 0.5*heat_addn;
visc_htg2 += 0.5*heat_addn;
};
};
#endif
};
}; // bUsableSide
}; // p_Select
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_neut[iMinor]), &(ownrates), sizeof(f64_vec3));
#ifdef COLLECT_VISC_HTG_IN_TRIANGLES
p_NT_addition_tri[iMinor].NnTn += visc_htg;
#else
p_NT_addition_tri[iMinor * 3 + 0].NnTn += visc_htg0;
p_NT_addition_tri[iMinor * 3 + 1].NnTn += visc_htg1;
p_NT_addition_tri[iMinor * 3 + 2].NnTn += visc_htg2;
#endif
// We will have to round this up into the vertex heat afterwards.
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor %d NAN VISC HTG\n", iMinor);
#endif
// We do best by taking each boundary, considering how
// much heat to add for each one.
}
else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
}
__global__ void kernelCreate_neutral_viscous_contrib_to_MAR_and_NT_SYMM(
structural * __restrict__ p_info_minor,
f64_vec3 * __restrict__ p_v_n_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
f64 * __restrict__ p_ita_neut_minor, //
f64 * __restrict__ p_nu_neut_minor, //
f64_vec3 * __restrict__ p_MAR_neut,
NTrates * __restrict__ p_NT_addition_rate,
NTrates * __restrict__ p_NT_addition_tri)
{
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor]; // sort of thing we want as input
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_ita_par[threadsPerTileMinor]; // reuse for i,e ; or make 2 vars to combine the routines.
__shared__ f64 shared_nu[threadsPerTileMinor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_ita_par_verts[threadsPerTileMajor];
__shared__ f64 shared_nu_verts[threadsPerTileMajor]; // used for creating ita_perp, ita_cross
// There is room for some more double in shared per thread.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
f64 nu, ita_par; // optimization: we always each loop want to get rid of omega, nu once we have calc'd these, if possible!!
f64_vec3 ownrates_visc;
f64 visc_htg;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_ita_par[threadIdx.x] = p_ita_neut_minor[iMinor];
shared_nu[threadIdx.x] = p_nu_neut_minor[iMinor];
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST))
{
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = p_ita_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_nu_verts[threadIdx.x] = p_nu_neut_minor[iVertex + BEGINNING_OF_CENTRAL];
// But now I am going to set ita == 0 in OUTERMOST and agree never to look there because that's fairer than one-way traffic and I don't wanna handle OUTERMOST?
// I mean, I could handle it, and do flows only if the flag does not come up OUTER_FRILL.
// OK just do that.
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
shared_ita_par_verts[threadIdx.x] = 0.0;
shared_nu_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
// How shall we arrange to do v_n, which is isotropic? Handle this first...
// Is the v_n coefficient negligible? Check.
// We actually have to think how to handle the x-y dimension. PopOhms will handle it.
// We can re-use some shared data -- such as pos and B -- to do both ions and electrons
// But they use different ita_par and different vez, viz.
// Often we don't need to do magnetised ion viscosity when we do magnetised electron.
f64_vec2 cc0, cc1;
if (threadIdx.x < threadsPerTileMajor) {
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
long izTri[MAXNEIGH_d];
char szPBC[MAXNEIGH_d];
short tri_len = info.neigh_len; // ?!
// JUST TO GET IT TO RUN: LIMIT OURSELVES TO RADIUS 4.5 :
if ((info.flag == DOMAIN_VERTEX) && (info.pos.modulus() < 4.5)
&& (shared_ita_par_verts[threadIdx.x] > 0.0))
//|| (info.flag == OUTERMOST))
{
// We are losing energy if there is viscosity into OUTERMOST.
memcpy(izTri, p_izTri + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH_d, MAXNEIGH_d * sizeof(char));
short i = 0;
f64_vec3 opp_v;// , prev_v, next_v; // never used
f64_vec2 opppos, prevpos, nextpos;
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E izTri[i] %d \n", opp_v.x, izTri[i]);
}
else {
opp_v = p_v_n_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E v_n_minor izTri[i] %d \n", opp_v.x, izTri[i]);
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
RotateAnticlockwise(opp_v);
}
CalculateCircumcenter(&cc0, info.pos, opppos, prevpos);
#pragma unroll
for (i = 0; i < tri_len; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
short iprev = i - 1; if (iprev < 0) iprev = tri_len - 1;
short inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
if (shared_ita_par_verts[threadIdx.x] < shared_ita_par[izTri[i] - StartMinor])
{
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
}
else {
ita_par = shared_ita_par[izTri[i] - StartMinor];
nu = shared_nu[izTri[i] - StartMinor];
};
} else {
f64 ita_theirs = p_ita_neut_minor[izTri[i]];
f64 nu_theirs = p_nu_neut_minor[izTri[i]];
if (shared_ita_par_verts[threadIdx.x] < ita_theirs) {
ita_par = shared_ita_par_verts[threadIdx.x];
nu = shared_nu_verts[threadIdx.x];
} else {
ita_par = ita_theirs;
nu = nu_theirs;
};
// I understand why we are still doing minimum ita at the wall but we would ideally like to stop.
};
// Guaranteed DOMAIN_VERTEX never needs to skip an edge; we include CROSSING_INS in viscosity.
// f64_vec2 gradvx, gradvy, gradvz;
f64_vec3 htg_diff;
// f64_vec2 edge_normal;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
CalculateCircumcenter(&cc1, opppos, info.pos, nextpos);
if (ita_par > 0.0) // note it was the minimum taken.
{
// ideally we might want to leave position out of the loop so that we can avoid reloading it.
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E izTri[i] %d \n", opp_v.x, izTri[i]);
} else {
opp_v = p_v_n_minor[izTri[i]];
// if (iVertex == VERTCHOSEN) printf("opp_v %1.9E v_n_minor izTri[i] %d \n", opp_v.x, izTri[i]);
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
RotateAnticlockwise(opp_v);
}
// BEWARE OF WHEN EDGE ISN'T CLOCKWISE ORDERED -- WE TAKE NEGATIVE AREA as well?
f64_vec3 deriv = (opp_v - shared_v_n_verts[threadIdx.x]) / (opppos - info.pos).modulus();
f64_vec3 visc_contrib = over_m_n*ita_par*deriv*(cc1 - cc0).modulus();
// there is an unnecessary sqrt but remember, sqrt is still cheaper than a divide.
htg_diff.x = shared_v_n_verts[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n_verts[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n_verts[threadIdx.x].z - opp_v.z;
if (TESTNEUTVISC)
printf("============================\nNeutral viscosity %d tri %d ita_par %1.10E\n"
"v %1.9E %1.9E %1.9E opp_v %1.9E %1.9E %1.9E\n"
"ourpos %1.8E %1.8E prevpos %1.8E %1.8E opppos %1.8E %1.8E nextpos %1.8E %1.8E \n"
,
iVertex, izTri[i], ita_par,
shared_v_n_verts[threadIdx.x].x, shared_v_n_verts[threadIdx.x].y,
shared_v_n_verts[threadIdx.x].z, opp_v.x, opp_v.y, opp_v.z,
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y);
// For neutral fluid viscosity does not involve dimensional transfers.
//f64_vec3 visc_contrib;
//visc_contrib.x = over_m_n*(ita_par*gradvx.dot(edge_normal)); // if we are looking at higher vz looking out, go up.
//visc_contrib.y = over_m_n*(ita_par*gradvy.dot(edge_normal));
//visc_contrib.z = over_m_n*(ita_par*gradvz.dot(edge_normal));
ownrates_visc += visc_contrib;
visc_htg += -THIRD*m_n*(htg_diff.dot(visc_contrib));
if (TESTNEUTVISC)
printf("htg_diff %1.9E %1.9E %1.9E visc_contrib %1.9E %1.9E %1.9E visc_htg %1.10E\n"
,
htg_diff.x, htg_diff.y, htg_diff.z, visc_contrib.x, visc_contrib.y, visc_contrib.z,
visc_htg
);
}
// MAR_elec -= Make3(0.5*(n0 * T0.Te + n1 * T1.Te)*over_m_e*edge_normal, 0.0);
// v0.vez = vie_k.vez + h_use * MAR.z / (n_use.n*AreaMinor);
cc0 = cc1;
prevpos = opppos;
opppos = nextpos;
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &ownrates, sizeof(f64_vec3));
p_NT_addition_rate[iVertex].NnTn += visc_htg;
if (TESTNEUTVISC) {
printf("%d : cumulative d/dt NnTn %1.10E \n", iVertex, p_NT_addition_rate[iVertex].NnTn);
};
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iVertex %d NaN ownrates.x\n", iVertex);
if (ownrates.y != ownrates.y)
printf("iVertex %d NaN ownrates.y\n", iVertex);
if (ownrates.z != ownrates.z)
printf("iVertex %d NaN ownrates.z\n", iVertex);
if (visc_htg != visc_htg) printf("iVertex %d NAN VISC HTG\n", iVertex);
#endif
} else {
// NOT domain vertex: Do nothing
};
};
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
info = p_info_minor[iMinor];
// memcpy(&(ownrates), &(p_MAR_ion[iMinor]), sizeof(f64_vec3));
memset(&ownrates_visc, 0, sizeof(f64_vec3));
visc_htg = 0.0;
f64_vec3 opp_v;
f64_vec2 prevpos, nextpos, opppos;
{
long izNeighMinor[6];
char szPBC[6];
if (TESTNEUTVISC2) printf("%d info.flag %d ita_ours %1.8E \n", iMinor, info.flag, shared_ita_par[threadIdx.x]);
// JUST TO GET IT TO RUN:
if (((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) &&
(info.pos.modulus() < 4.9) && (shared_ita_par[threadIdx.x] > 0.0)){
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
short i = 0;
short inext = i + 1; if (inext > 5) inext = 0;
short iprev = i - 1; if (iprev < 0) iprev = 5;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
};
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
CalculateCircumcenter(&cc0, info.pos, opppos, prevpos);
// Let's make life easier and load up an array of 6 n's beforehand.
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
iprev = i - 1; if (iprev < 0) iprev = 5;
bool bUsableSide = true;
{
// newly uncommented:
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par[izNeighMinor[i] - StartMinor])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par[izNeighMinor[i] - StartMinor];
nu = shared_nu[izNeighMinor[i] - StartMinor];
};
if (shared_ita_par[izNeighMinor[i] - StartMinor] == 0.0) bUsableSide = false;
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
if (shared_ita_par[threadIdx.x] < shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL])
{
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x];
}
else {
ita_par = shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
nu = shared_nu_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL];
};
if (shared_ita_par_verts[izNeighMinor[i] - StartMajor - BEGINNING_OF_CENTRAL] == 0.0) bUsableSide = false;
}
else {
f64 ita_par_opp = p_ita_neut_minor[izNeighMinor[i]];
f64 nu_theirs = p_nu_neut_minor[izNeighMinor[i]];
if (shared_ita_par[threadIdx.x] < ita_par_opp) {
ita_par = shared_ita_par[threadIdx.x];
nu = shared_nu[threadIdx.x]; // why do I deliberately use the corresponding nu? nvm
}
else {
ita_par = ita_par_opp;
nu = nu_theirs;
}
if (ita_par_opp == 0.0) bUsableSide = false;
};
}
};
// basically bUsableSide here just depends on whether min(ita, ita_opp) == 0.
f64_vec3 htg_diff;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
CalculateCircumcenter(&cc1, info.pos, nextpos, opppos);
if (bUsableSide) {
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
}
else {
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
RotateClockwise(opp_v);
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
RotateAnticlockwise(opp_v);
}
htg_diff.x = shared_v_n[threadIdx.x].x - opp_v.x;
htg_diff.y = shared_v_n[threadIdx.x].y - opp_v.y;
htg_diff.z = shared_v_n[threadIdx.x].z - opp_v.z;
f64_vec3 deriv = (opp_v - shared_v_n[threadIdx.x]) / (opppos - info.pos).modulus();
f64_vec3 visc_contrib = over_m_n*ita_par*(cc1 - cc0).modulus()*deriv;
// Set to 0 any that are pushing momentum uphill. For neutral this is unphysical.
// if (visc_contrib.x*htg_diff.x > 0.0) visc_contrib.x = 0.0;
// Can't do it because it'll ruin backward solve.
ownrates_visc += visc_contrib;
if (0)//iMinor == CHOSEN)
printf("============================\nNeutral viscosity %d %d \n"
"v.z %1.9E opp_v.z %1.9E ita_par %1.9E edgelen %1.9E dist_out %1.9E \n"
"ourpos %1.9E %1.9E prevpos %1.9E %1.9E opppos %1.9E %1.9E nextpos %1.9E %1.9E \n"
"deriv.z %1.9E visc_contrib.z %1.9E ownrates.z %1.9E cc0 %1.8E %1.8E cc1 %1.8E %1.8E\n",
iMinor, izNeighMinor[i],
shared_v_n[threadIdx.x].z, opp_v.z, ita_par,
(cc1 - cc0).modulus(), (opppos - info.pos).modulus(),
info.pos.x, info.pos.y, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y,
deriv.z, visc_contrib.z, ownrates_visc.z, cc0.x,cc0.y, cc1.x, cc1.y
);
if (i % 2 == 0) {
// vertex : heat collected by vertex
} else {
visc_htg += -THIRD*m_ion*(htg_diff.dot(visc_contrib));
};
}; // bUsableSide
cc0 = cc1;
prevpos = opppos;
opppos = nextpos;
}; // next i
f64_vec3 ownrates;
memcpy(&ownrates, &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
ownrates += ownrates_visc;
memcpy(&(p_MAR_neut[iMinor]), &(ownrates), sizeof(f64_vec3));
p_NT_addition_tri[iMinor].NnTn += visc_htg;
// We will have to round this up into the vertex heat afterwards.
#ifdef DEBUGNANS
if (ownrates.x != ownrates.x)
printf("iMinor %d NaN ownrates.x\n", iMinor);
if (ownrates.y != ownrates.y)
printf("iMinor %d NaN ownrates.y\n", iMinor);
if (ownrates.z != ownrates.z)
printf("iMinor %d NaN ownrates.z\n", iMinor);
if (visc_htg != visc_htg) printf("iMinor %d NAN VISC HTG\n", iMinor);
#endif
// We do best by taking each boundary, considering how
// much heat to add for each one.
}
else {
// Not domain tri or crossing_ins
// Did we fairly model the insulator as a reflection of v?
}
} // scope
}
/*
__global__ void kernelNeutral_pressure_and_momflux(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighTriMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
T3 * __restrict__ p_T_minor,
f64_vec3 * __restrict__ p_v_n_minor,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just to handle insulator
f64_vec2 * __restrict__ p_v_overall_minor,
f64_vec3 * __restrict__ p_MAR_neut
)
{
this routine is missing the changes to handle insulator tris, and shuold not have a setting vr=0
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor];
__shared__ f64_vec2 shared_v_overall[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_Tn[threadsPerTileMinor]; // 3+2+2+1=8 per thread
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_v_overall_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Tn_verts[threadsPerTileMajor]; // 1/2( 13+3+2+2+1 = 21) = 10.5 => total 18.5 per minor thread.
// shame we couldn't get down to 16 per minor thread, and if we could then that might be better even if we load on-the-fly something.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_v_overall[threadIdx.x] = p_v_overall_minor[iMinor];
shared_Tn[threadIdx.x] = p_T_minor[iMinor].Tn; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
// Advection should be an outer cycle at 1e-10 s.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if (info.flag == DOMAIN_VERTEX) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_v_overall_verts[threadIdx.x] = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_Tn_verts[threadIdx.x] = p_T_minor[iVertex + BEGINNING_OF_CENTRAL].Tn;
}
else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
memset(&(shared_v_overall_verts[threadIdx.x]), 0, sizeof(f64_vec2));
shared_Tn_verts[threadIdx.x] = 0.0;
};
};
__syncthreads();
f64_vec3 our_v, opp_v, prev_v, next_v;
f64 oppT, prevT, nextT, ourT;
f64_vec2 our_v_overall, prev_v_overall, next_v_overall, opp_v_overall;
f64_vec2 opppos, prevpos, nextpos;
f64 AreaMinor;
if (threadIdx.x < threadsPerTileMajor) {
AreaMinor = 0.0;
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
our_v = shared_v_n_verts[threadIdx.x];
our_v_overall = shared_v_overall_verts[threadIdx.x];
ourT = shared_Tn_verts[threadIdx.x];
if (info.flag == DOMAIN_VERTEX) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevT = shared_Tn[izTri[iprev] - StartMinor];
prev_v = shared_v_n[izTri[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
T3 prev_T = p_T_minor[izTri[iprev]];
prevT = prev_T.Tn;
prev_v = p_v_n_minor[izTri[iprev]];
prev_v_overall = p_v_overall_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v = Clockwise_rotate3(prev_v);
prev_v_overall = Clockwise_d*prev_v_overall;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v = Anticlock_rotate3(prev_v);
prev_v_overall = Anticlockwise_d*prev_v_overall;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppT = shared_Tn[izTri[i] - StartMinor];
opp_v = shared_v_n[izTri[i] - StartMinor];
opp_v_overall = shared_v_overall[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
} else {
T3 opp_T = p_T_minor[izTri[i]];
oppT = opp_T.Tn;
opp_v = p_v_n_minor[izTri[i]];
opp_v_overall = p_v_overall_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v = Clockwise_rotate3(opp_v);
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v = Anticlock_rotate3(opp_v);
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
// Think carefully: DOMAIN vertex cases for n,T ...
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
short iend = tri_len;
f64_vec2 projendpt0;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) {
iend = tri_len - 2;
if (info.flag == OUTERMOST) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_OUTER_RADIUS_d); // back of cell for Lap purposes
} else {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
}
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextT = shared_Tn[izTri[inext] - StartMinor];
next_v = shared_v_n[izTri[inext] - StartMinor];
next_v_overall = shared_v_overall[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
T3 next_T = p_T_minor[izTri[inext]];
nextT = next_T.Tn;
next_v = p_v_n_minor[izTri[inext]];
next_v_overall = p_v_overall_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v = Clockwise_rotate3(next_v);
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v = Anticlock_rotate3(next_v);
next_v_overall = Anticlockwise_d*next_v_overall;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 n1;
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
f64 T0, T1;
T0 = THIRD*(prevT + ourT + oppT);
T1 = THIRD*(nextT + ourT + oppT);
f64_vec3 v0 = THIRD*(our_v + prev_v + opp_v);
f64_vec3 v1 = THIRD*(our_v + opp_v + next_v);
f64 relvnormal = 0.5*((v0 + v1).xypart()
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
// CHANGES 20th August 2019
// OLD, unstable:
// MAR_neut -= 0.5*relvnormal* (n0 *(v0-our_v) + n1 * (v1 - our_v));
if (relvnormal < 0.0)
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v - our_v);
// Note: minus a minus so correct sign
// And we did what? We took n at centre of a triangle WITHIN this major cell
// But did not take upwind n ---- is that consistent for all advection?
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
// ______________________________________________________
//// whether the v that is leaving is greater than our v ..
//// Formula:
//// dv/dt = (d(Nv)/dt - dN/dt v) / N
//// We include the divide by N when we enter the accel routine.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prevT = oppT;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
oppT = nextT;
opp_v_overall = next_v_overall;
}; // next i
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &(MAR_neut), sizeof(f64_vec3));
} else {
// NOT domain vertex: Do nothing
};
}; // was it domain vertex or Az-only
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
our_v = shared_v_n[threadIdx.x];
ourT = shared_Tn[threadIdx.x];
our_v_overall = shared_v_overall[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighTriMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Do nothing? Who cares what it is.
} else {
AreaMinor = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_v_n[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevT = shared_Tn[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_v_n_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prev_v_overall = shared_v_overall_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevT = shared_Tn_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_v_n_minor[izNeighMinor[iprev]]), sizeof(f64_vec3));
prev_v_overall = p_v_overall_minor[izNeighMinor[iprev]];
prevT = p_T_minor[izNeighMinor[iprev]].Tn;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v = Clockwise_rotate3(prev_v);
prev_v_overall = Clockwise_d*prev_v_overall;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v = Anticlock_rotate3(prev_v);
prev_v_overall = Anticlockwise_d*prev_v_overall;
};
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
opp_v_overall = shared_v_overall[izNeighMinor[i] - StartMinor];
oppT = shared_Tn[izNeighMinor[i] - StartMinor];
} else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opp_v_overall = shared_v_overall_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppT = shared_Tn_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
opp_v_overall = p_v_overall_minor[izNeighMinor[i]];
T3 opp_T = p_T_minor[izNeighMinor[i]];
oppT = opp_T.Tn;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v = Clockwise_rotate3(opp_v);
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v = Anticlock_rotate3(opp_v);
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
} else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
} else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_v_n[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
next_v_overall = shared_v_overall[izNeighMinor[inext] - StartMinor];
nextT = shared_Tn[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_v_n_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
next_v_overall = shared_v_overall_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextT = shared_Tn_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_v_n_minor[izNeighMinor[inext]]), sizeof(f64_vec3));
next_v_overall = p_v_overall_minor[izNeighMinor[inext]];
nextT = p_T_minor[izNeighMinor[inext]].Tn;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v = Clockwise_rotate3(next_v);
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v = Anticlock_rotate3(next_v);
next_v_overall = Anticlockwise_d*next_v_overall;
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
n0 = n_array[i];
n1 = n_array[inext]; // 0,1 are either side of corner 0. What is seq of MinorNeigh ?
// Assume neighs 0,1 are relevant to border with tri 0 minor.
f64_vec3 v0 = THIRD*(our_v + prev_v + opp_v);
f64_vec3 v1 = THIRD*(our_v + next_v + opp_v);
//if (((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
{ // Decided not to add test
f64 relvnormal = 0.5*((v0 + v1).xypart()
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
// CHANGES 20th August 2019:
// OLD, unstable:
// MAR_neut -= 0.5*relvnormal* (n0 *(v0-our_v) + n1 * (v1 - our_v));
if (relvnormal < 0.0)
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v - our_v);
f64 T0 = THIRD*(ourT + prevT + oppT);
f64 T1 = THIRD*(ourT + nextT + oppT);
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
// do nothing
}
else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
// Or allowed a below-ins value to affect something anyway.
// Just for sanity for now, let's just set our own n,T for the edge:
n0 = p_n_minor[iMinor].n_n;
n1 = p_n_minor[iMinor].n_n;
T0 = ourT;
T1 = ourT;
}
}
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
}
endpt0 = endpt1;
prevT = oppT;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
oppT = nextT;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
};
if (info.flag == CROSSING_INS) {
// In this case set v_r = 0 and set a_TP_r = 0 and dv/dt _r = 0 in general
//f64_vec2 rhat = info.pos / info.pos.modulus();
MAR_neut -= Make3(
(MAR_neut.dotxy(info.pos) /
(info.pos.x*info.pos.x + info.pos.y*info.pos.y))*info.pos, 0.0);
no
// Hmm
};
memcpy(&(p_MAR_neut[iMinor]), &(MAR_neut), sizeof(f64_vec3));
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
} // non-domain tri
}; // was it FRILL
}*/
__global__ void kernelNeutral_pressure(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighTriMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
T3 * __restrict__ p_T_minor,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just to handle insulator
bool * __restrict__ bz_pressureflag,
f64_vec3 * __restrict__ p_MAR_neut
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64 shared_Tn[threadsPerTileMinor]; // 3+2+2+1=8 per thread
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ f64 shared_Tn_verts[threadsPerTileMajor]; // 1/2( 13+3+2+2+1 = 21) = 10.5 => total 18.5 per minor thread.
// shame we couldn't get down to 16 per minor thread, and if we could then that might be better even if we load on-the-fly something.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
shared_Tn[threadIdx.x] = p_T_minor[iMinor].Tn; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
// Advection should be an outer cycle at 1e-10 s.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_Tn_verts[threadIdx.x] = p_T_minor[iVertex + BEGINNING_OF_CENTRAL].Tn;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
};
};
__syncthreads();
f64 oppT, prevT, nextT, ourT;
f64_vec2 opppos, prevpos, nextpos;
f64 AreaMinor;
if (threadIdx.x < threadsPerTileMajor) {
AreaMinor = 0.0;
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
ourT = shared_Tn_verts[threadIdx.x];
bool bPressure = bz_pressureflag[iVertex];
if (bPressure) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevT = shared_Tn[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
T3 prev_T = p_T_minor[izTri[iprev]];
prevT = prev_T.Tn;
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppT = shared_Tn[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
T3 opp_T = p_T_minor[izTri[i]];
oppT = opp_T.Tn;
opppos = p_info_minor[izTri[i]].pos;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
short iend = tri_len;
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextT = shared_Tn[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
T3 next_T = p_T_minor[izTri[inext]];
nextT = next_T.Tn;
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 n1;
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
f64 T0, T1;
T0 = THIRD*(prevT + ourT + oppT);
T1 = THIRD*(nextT + ourT + oppT);
// And we did what? We took n at centre of a triangle WITHIN this major cell
// But did not take upwind n ---- is that consistent for all advection?
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
// AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
if (TESTVNXVERT) {
printf("iVertex %d %d iTri %d : contrib.x %1.8E n01 %1.8E %1.8E T01 %1.8E %1.8E oppT %1.8E cumu %1.8E\n",
iVertex, i, izTri[i], 0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.x,
n0, n1, T0, T1, oppT, MAR_neut.x);
}
if (TESTVNYVERT) {
printf("iVertex %d %d iTri %d : contrib.y %1.8E n01 %1.8E %1.8E T01 %1.8E %1.8E oppT %1.8E cumu %1.8E\n",
iVertex, i, izTri[i], 0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.y,
n0, n1, T0, T1, oppT, MAR_neut.y);
}
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prevT = oppT;
opppos = nextpos;
oppT = nextT;
}; // next i
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &(MAR_neut), sizeof(f64_vec3));
}
else {
// NOT domain vertex: Do nothing
};
}; // was it domain vertex or Az-only
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
ourT = shared_Tn[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighTriMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Do nothing? Who cares what it is.
} else {
AreaMinor = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevT = shared_Tn[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
} else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevT = shared_Tn_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
prevT = p_T_minor[izNeighMinor[iprev]].Tn;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
};
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
oppT = shared_Tn[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppT = shared_Tn_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
T3 opp_T = p_T_minor[izNeighMinor[i]];
oppT = opp_T.Tn;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
} else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
} else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
} else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
nextT = shared_Tn[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextT = shared_Tn_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
nextT = p_T_minor[izNeighMinor[inext]].Tn;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
if (TESTVNY2) printf("i %d prevpos %1.9E %1.9E opppos %1.9E %1.9E nextpos %1.9E %1.9E\n",
i, prevpos.x, prevpos.y, opppos.x, opppos.y, nextpos.x, nextpos.y);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
n0 = n_array[i];
n1 = n_array[inext]; // 0,1 are either side of corner 0. What is seq of MinorNeigh ?
// Assume neighs 0,1 are relevant to border with tri 0 minor.
//if (((izNeighMinor[i] >= NumInnerFrills_d) && (izNeighMinor[i] < FirstOuterFrill_d)))
{ // Decided not to add test
f64 T0 = THIRD*(ourT + prevT + oppT);
f64 T1 = THIRD*(ourT + nextT + oppT);
// CROSSING_INS pressure isn't working .. somehow we are not getting edge_normal.y to sum to zero
// when we have n and T the same all the way round.
// However there's not much sense in having it anyway since nT can't possibly be different from top to bottom.
// We could inherit the x-direction pressure if we wanted though...
// Ideally it would be good to ask why this is happening.
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
if (TESTVNY2) printf("CROSSING INS! THERMAL PRESSURE %d MAR_neut %1.10E contrib %1.10E n01 %1.8E %1.8E\n"
"T %1.10E %1.10E edge_normal %1.9E %1.9E \n",
iMinor, MAR_neut.y, 0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.y,
n0, n1, T0, T1, edge_normal.x, edge_normal.y);
} else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
// Or allowed a below-ins value to affect something anyway.
// Just for sanity for now, let's just set our own n,T for the edge:
n0 = p_n_minor[iMinor].n_n;
n1 = p_n_minor[iMinor].n_n;
T0 = ourT;
T1 = ourT;
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
}
else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// set nT on the edge: try just the average of the two nT, weighted by distance to own centre.
// Recall periodic when we look at distance to own centre.
f64 nT_edge = 0.5*(p_n_minor[iMinor].n_n*ourT + p_n_minor[izNeighMinor[i]].n_n*oppT);
MAR_neut -= Make3(nT_edge*over_m_n*edge_normal, 0.0);
if ((TESTVNY2)) printf("crossing-crossing: contrib %1.8E nT_edge %1.8E edge_normal %1.8E ourT %1.8E oppT %1.8E endpt0 %1.8E %1.8E edpt1 %1.8E %1.8E\n", -nT_edge*over_m_n*edge_normal.y,
nT_edge, edge_normal.y, ourT, oppT,endpt0.x, endpt0.y, endpt1.x, endpt1.y);
}
else {
// looking out the bottom of the insulator triangle at a within-insulator vertex or triangle.
// so we want to project the point up to the insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
// endpt0 is THIRD * (prevpos + info.pos + opppos)
// move towards the position that is 2 previous --- ie the vertex above.
// (Don't forget PBC.)
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
if (TESTVNY2) printf("prevprevpos %1.9E %1.9E \n", prevprevpos.x, prevprevpos.y);
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
}
else {
if (TESTVNY2) printf("%%%%%%%%%%%%%%%%%%%%%%%%%% \nprevflag %d iprev %d izNeighMinor[iprev] %d\n%%%%%%%%%%%%%%%%%%%%%%%% /n",
prevflag, iprev, izNeighMinor[iprev]);
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
// Don't forget PBC
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
if (TESTVNY2) printf("nextnextpos %1.9E %1.9E \n", nextnextpos.x, nextnextpos.y);
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
}
else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
}
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
f64 nT_edge = p_n_minor[iMinor].n_n*ourT;
MAR_neut -= Make3(nT_edge*over_m_n*edge_normal, 0.0);
if ((TESTVNY2)) printf("Looking into ins: contrib %1.8E nT_edge %1.8E edge_normal %1.8E endpot01 %1.8E %1.8E , %1.8E %1.8E\n", -nT_edge*over_m_n*edge_normal.y,
nT_edge, edge_normal.y, endpt0.x, endpt0.y, endpt1.x, endpt1.y);
// will be a 0 contribution if endpt1 = endpt0, that's ok.
}; // CROSSING_INS neigh or not
}; // domain triangle neigh opposite or not
} else {
MAR_neut -= Make3(0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal, 0.0);
if (TESTVNY2) printf("domain! PRESSURE %d : %d %d MAR_neut %1.10E contrib %1.10E n01 %1.8E %1.8E\n"
"T %1.10E %1.10E edge_normal %1.9E %1.9E \n",
iMinor, i, izNeighMinor[i], MAR_neut.y, 0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.y,
n0, n1, T0, T1, edge_normal.x, edge_normal.y);
};
if ((TESTVNY2))
printf("iMinor %d : flag %d : %d %d [flag %d] n01 %1.9E %1.9E T01 %1.9E %1.9E oppT %1.9E contrib %1.10E MAR %1.9E\n",
iMinor, info.flag, i, izNeighMinor[i],
p_info_minor[izNeighMinor[i]].flag,
n0, n1, T0, T1, oppT, -0.5*(n0*T0 + n1*T1)*over_m_n*edge_normal.y,
MAR_neut.y);
}
endpt0 = endpt1;
prevT = oppT;
prevpos = opppos;
oppT = nextT;
opppos = nextpos;
iprev = i;
};
memcpy(&(p_MAR_neut[iMinor]), &(MAR_neut), sizeof(f64_vec3));
} else {
// Not domain, not crossing_ins, not a frill
// ==========================================
// Crossing cath goes here: no pressure.
// AreaMinor unknown ???
} // non-domain tri
}; // was it FRILL
}
__global__ void kernelNeutral_momflux(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighTriMinor,
char * __restrict__ p_szPBCtriminor,
LONG3 * __restrict__ p_who_am_I_to_corners,
LONG3 * __restrict__ p_tricornerindex,
f64_vec3 * __restrict__ p_v_n_minor,
ShardModel * __restrict__ p_n_shards,
nvals * __restrict__ p_n_minor, // Just to handle insulator
f64_vec2 * __restrict__ p_v_overall_minor,
f64_vec3 * __restrict__ p_MAR_neut,
NTrates * __restrict__ NT_addition_tri
)
{
__shared__ f64_vec3 shared_v_n[threadsPerTileMinor];
__shared__ f64_vec2 shared_v_overall[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ ShardModel shared_n_shards[threadsPerTileMajor];
__shared__ f64_vec3 shared_v_n_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_v_overall_verts[threadsPerTileMajor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
// shame we couldn't get down to 16 per minor thread, and if we could then that might be better even if we load on-the-fly something.
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos; // QUESTION: DOES THIS LOAD CONTIGUOUSLY?
shared_v_n[threadIdx.x] = p_v_n_minor[iMinor];
shared_v_overall[threadIdx.x] = p_v_overall_minor[iMinor];
// Advection should be an outer cycle at 1e-10 s.
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
if ((info.flag == DOMAIN_VERTEX) || (info.flag == OUTERMOST)) {
memcpy(&(shared_n_shards[threadIdx.x]), &(p_n_shards[iVertex]), sizeof(ShardModel)); // + 13
memcpy(&(shared_v_n_verts[threadIdx.x]), &(p_v_n_minor[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
shared_v_overall_verts[threadIdx.x] = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
} else {
// it still manages to coalesce, let's hope, because ShardModel is 13 doubles not 12.
memset(&(shared_n_shards[threadIdx.x]), 0, sizeof(ShardModel)); // + 13
memset(&(shared_v_n_verts[threadIdx.x]), 0, sizeof(f64_vec3));
memset(&(shared_v_overall_verts[threadIdx.x]), 0, sizeof(f64_vec2));
};
};
__syncthreads();
f64_vec3 our_v, opp_v, prev_v, next_v;
f64_vec2 our_v_overall, prev_v_overall, next_v_overall, opp_v_overall;
f64_vec2 opppos, prevpos, nextpos;
f64 AreaMinor;
if (threadIdx.x < threadsPerTileMajor) {
AreaMinor = 0.0;
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iVertex + BEGINNING_OF_CENTRAL]), sizeof(f64_vec3));
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
our_v = shared_v_n_verts[threadIdx.x];
our_v_overall = shared_v_overall_verts[threadIdx.x];
if (info.flag == DOMAIN_VERTEX) {
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prev_v = shared_v_n[izTri[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prev_v = p_v_n_minor[izTri[iprev]];
prev_v_overall = p_v_overall_minor[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v = Clockwise_rotate3(prev_v);
prev_v_overall = Clockwise_d*prev_v_overall;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v = Anticlock_rotate3(prev_v);
prev_v_overall = Anticlockwise_d*prev_v_overall;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opp_v = shared_v_n[izTri[i] - StartMinor];
opp_v_overall = shared_v_overall[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
opp_v = p_v_n_minor[izTri[i]];
opp_v_overall = p_v_overall_minor[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v = Clockwise_rotate3(opp_v);
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v = Anticlock_rotate3(opp_v);
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
// Think carefully: DOMAIN vertex cases for n,T ...
f64 n0 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[iprev] + shared_n_shards[threadIdx.x].n_cent);
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
short iend = tri_len;
// DOMAIN_VERTEX only here!
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
next_v = shared_v_n[izTri[inext] - StartMinor];
next_v_overall = shared_v_overall[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
next_v = p_v_n_minor[izTri[inext]];
next_v_overall = p_v_overall_minor[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v = Clockwise_rotate3(next_v);
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v = Anticlock_rotate3(next_v);
next_v_overall = Anticlockwise_d*next_v_overall;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
f64 n1;
n1 = THIRD*(shared_n_shards[threadIdx.x].n[i] + shared_n_shards[threadIdx.x].n[inext] + shared_n_shards[threadIdx.x].n_cent);
f64_vec3 v0 = THIRD*(our_v + prev_v + opp_v);
f64_vec3 v1 = THIRD*(our_v + opp_v + next_v);
f64 relvnormal = 0.5*((v0 + v1).xypart()
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
// CHANGES 20th August 2019
// OLD, unstable:
// MAR_neut -= 0.5*relvnormal* (n0 *(v0-our_v) + n1 * (v1 - our_v));
int neighflag = p_info_minor[izTri[i]].flag;
if (neighflag == DOMAIN_TRIANGLE) {
if (relvnormal > 0.0) {
// losing stuff
MAR_neut -= 0.5*relvnormal*(n0 + n1)*our_v;
}
else {
MAR_neut -= 0.5*relvnormal*(n0 + n1)*opp_v;
// Why it's minus?
// relvnormal was less than zero but we gain a positive amt of opp_v.
};
};
if (TESTVNY3) {
printf("%d | %d %d | MAR_neuty %1.9E contrib %1.9E %1.9E n0+n1 %1.9E v0.y %1.9E \n"
"our_v_overall %1.9E next_v_overall %1.9E prev_v_overall %1.9E \n"
"our_v %1.9E opp_v %1.9E next_v %1.9E prev_v %1.9E\n",
iVertex, i, izTri[i], MAR_neut.y,
-0.5*relvnormal* (n0 + n1) *(our_v.y),
-0.5*relvnormal* (n0 + n1) *(opp_v.y),
n0+n1, v0.y,
our_v_overall.y, next_v_overall.y, prev_v_overall.y,
our_v.y, opp_v.y, next_v.y, prev_v.y);
}
if (TESTVNXVERT) {
printf("%d | %d %d | MAR_neutx %1.9E contrib %1.9E %1.9E n0+n1 %1.9E v0.y %1.9E \n"
"our_v_overall %1.9E next_v_overall %1.9E prev_v_overall %1.9E \n"
"our_v %1.9E opp_v %1.9E next_v %1.9E prev_v %1.9E\n",
iVertex, i, izTri[i], MAR_neut.x,
-0.5*relvnormal* (n0 + n1) *(our_v.x),
-0.5*relvnormal* (n0 + n1) *(opp_v.x),
n0 + n1, v0.x,
our_v_overall.x, next_v_overall.x, prev_v_overall.x,
our_v.x, opp_v.x, next_v.x, prev_v.x);
}
// ______________________________________________________
//// whether the v that is leaving is greater than our v ..
//// Formula:
//// dv/dt = (d(Nv)/dt - dN/dt v) / N
//// We include the divide by N when we enter the accel routine.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
n0 = n1;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
}; // next i
memcpy(p_MAR_neut + iVertex + BEGINNING_OF_CENTRAL, &(MAR_neut), sizeof(f64_vec3));
}
else {
// NOT domain vertex: Do nothing
};
}; // was it domain vertex or Az-only
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
our_v = shared_v_n[threadIdx.x];
our_v_overall = shared_v_overall[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighTriMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
f64_vec3 MAR_neut;
memcpy(&(MAR_neut), &(p_MAR_neut[iMinor]), sizeof(f64_vec3));
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
// Do nothing? Who cares what it is.
}
else {
AreaMinor = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
memcpy(&prev_v, &(shared_v_n[izNeighMinor[iprev] - StartMinor]), sizeof(f64_vec3));
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
prev_v_overall = shared_v_overall[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&prev_v, &(shared_v_n_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
prev_v_overall = shared_v_overall_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
memcpy(&prev_v, &(p_v_n_minor[izNeighMinor[iprev]]), sizeof(f64_vec3));
prev_v_overall = p_v_overall_minor[izNeighMinor[iprev]];
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
prev_v = Clockwise_rotate3(prev_v);
prev_v_overall = Clockwise_d*prev_v_overall;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
prev_v = Anticlock_rotate3(prev_v);
prev_v_overall = Anticlockwise_d*prev_v_overall;
};
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
memcpy(&opp_v, &(shared_v_n[izNeighMinor[i] - StartMinor]), sizeof(f64_vec3));
opppos = shared_pos[izNeighMinor[i] - StartMinor];
opp_v_overall = shared_v_overall[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&opp_v, &(shared_v_n_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
opp_v_overall = shared_v_overall_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
memcpy(&opp_v, &(p_v_n_minor[izNeighMinor[i]]), sizeof(f64_vec3));
opp_v_overall = p_v_overall_minor[izNeighMinor[i]];
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
opp_v = Clockwise_rotate3(opp_v);
opp_v_overall = Clockwise_d*opp_v_overall;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
opp_v = Anticlock_rotate3(opp_v);
opp_v_overall = Anticlockwise_d*opp_v_overall;
}
long who_am_I_to_corners[3];
memcpy(who_am_I_to_corners, &(p_who_am_I_to_corners[iMinor]), sizeof(long) * 3);
LONG3 cornerindex = p_tricornerindex[iMinor];
// each corner we want to pick up 3 values off n_shards, as well as n_cent.
// The three values will not always be contiguous!!!
// Let's make life easier and load up an array of 6 n's beforehand.
f64 n_array[6];
f64 n0, n1;
short who_am_I = who_am_I_to_corners[0];
short tri_len = p_info_minor[cornerindex.i1 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i1 >= StartMajor) && (cornerindex.i1 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[0] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
n_array[1] = THIRD*(shared_n_shards[cornerindex.i1 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i1 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i1 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i1].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i1].n, sizeof(f64_vec2));
n_array[0] = THIRD*(temp.x + temp.y + ncent);
n_array[1] = THIRD*(p_n_shards[cornerindex.i1].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64_vec2));
n_array[0] = THIRD*(p_n_shards[cornerindex.i1].n[0] + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i1].n[who_prev]), sizeof(f64) * 3);
n_array[0] = THIRD*(temp.z + temp.y + ncent);
n_array[1] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[1];
tri_len = p_info_minor[cornerindex.i2 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i2 >= StartMajor) && (cornerindex.i2 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[2] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
n_array[3] = THIRD*(shared_n_shards[cornerindex.i2 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i2 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i2 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i2].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i2].n, sizeof(f64_vec2));
n_array[2] = THIRD*(temp.x + temp.y + ncent);
n_array[3] = THIRD*(p_n_shards[cornerindex.i2].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64_vec2));
n_array[2] = THIRD*(p_n_shards[cornerindex.i2].n[0] + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i2].n[who_prev]), sizeof(f64) * 3);
n_array[2] = THIRD*(temp.z + temp.y + ncent);
n_array[3] = THIRD*(temp.x + temp.y + ncent);
};
};
}
who_am_I = who_am_I_to_corners[2];
tri_len = p_info_minor[cornerindex.i3 + BEGINNING_OF_CENTRAL].neigh_len;
if ((cornerindex.i3 >= StartMajor) && (cornerindex.i3 < EndMajor))
{
short who_prev = who_am_I - 1;
if (who_prev < 0) who_prev = tri_len - 1;
// Worry about pathological cases later.
short who_next = who_am_I + 1;
if (who_next == tri_len) who_next = 0;
n_array[4] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_next]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
n_array[5] = THIRD*(shared_n_shards[cornerindex.i3 - StartMajor].n[who_prev]
+ shared_n_shards[cornerindex.i3 - StartMajor].n[who_am_I]
+ shared_n_shards[cornerindex.i3 - StartMajor].n_cent);
}
else {
// comes from elsewhere
f64 ncent = p_n_shards[cornerindex.i3].n_cent;
short who_prev = who_am_I - 1;
if (who_prev < 0) {
who_prev = tri_len - 1;
f64_vec2 temp;
memcpy(&temp, p_n_shards[cornerindex.i3].n, sizeof(f64_vec2));
n_array[4] = THIRD*(temp.x + temp.y + ncent);
n_array[5] = THIRD*(p_n_shards[cornerindex.i3].n[who_prev] + temp.x + ncent);
}
else {
short who_next = who_am_I + 1;
if (who_next == tri_len) {
f64_vec2 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64_vec2));
n_array[4] = THIRD*(p_n_shards[cornerindex.i3].n[0] + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
}
else {
// typical case
f64_vec3 temp;
memcpy(&temp, &(p_n_shards[cornerindex.i3].n[who_prev]), sizeof(f64) * 3);
n_array[4] = THIRD*(temp.z + temp.y + ncent);
n_array[5] = THIRD*(temp.x + temp.y + ncent);
};
};
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
memcpy(&next_v, &(shared_v_n[izNeighMinor[inext] - StartMinor]), sizeof(f64_vec3));
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
next_v_overall = shared_v_overall[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
memcpy(&next_v, &(shared_v_n_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor]), sizeof(f64_vec3));
next_v_overall = shared_v_overall_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
} else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
memcpy(&next_v, &(p_v_n_minor[izNeighMinor[inext]]), sizeof(f64_vec3));
next_v_overall = p_v_overall_minor[izNeighMinor[inext]];
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
next_v = Clockwise_rotate3(next_v);
next_v_overall = Clockwise_d*next_v_overall;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
next_v = Anticlock_rotate3(next_v);
next_v_overall = Anticlockwise_d*next_v_overall;
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
n0 = n_array[i];
n1 = n_array[inext]; // 0,1 are either side of corner 0. What is seq of MinorNeigh ?
// Assume neighs 0,1 are relevant to border with tri 0 minor.
f64_vec3 v0 = THIRD*(our_v + prev_v + opp_v);
f64_vec3 v1 = THIRD*(our_v + next_v + opp_v);
f64 relvnormal = 0.5*((v0 + v1).xypart()
- (THIRD * (our_v_overall + next_v_overall + opp_v_overall))
- (THIRD * (our_v_overall + prev_v_overall + opp_v_overall))
).dot(edge_normal);
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
if (izNeighMinor[i] < BEGINNING_OF_CENTRAL) {
// Note that average instead of upwind, is of course unstable.
if (relvnormal > 0.0) {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(our_v);
}
else {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v);
};
}
} else {
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 v_overall0, v_overall1;
v_overall0 = THIRD * (our_v_overall + prev_v_overall + opp_v_overall);
v_overall1 = THIRD * (our_v_overall + next_v_overall + opp_v_overall);
// Note that this follows from the arithmetic definition of the thing.
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
// endpt1 is defined in this way, so its motion must be defined accordingly.
// The v_overall of the below-insulator point is actually 0.
f64 r3 = nextpos.modulus();
v_overall1 = ((DEVICE_RADIUS_INSULATOR_OUTER - r3) / (r1 - r3))*v_overall0;
// but has no radial component:
v_overall1 -= (v_overall1.dot(endpt1)) / (endpt1.dot(endpt1))*endpt1;
}
else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
f64 r3 = prevpos.modulus();
v_overall0 = ((DEVICE_RADIUS_INSULATOR_OUTER - r3) / (r2 - r3))*v_overall1;
// but has no radial component:
v_overall0 -= (v_overall0.dot(endpt0)) / (endpt0.dot(endpt0))*endpt1;
};
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// have not yet handled how to do momflux between two CROSSING_INS tris.
// the above vxy1 etc will be invalid because of taking data from insulator points.
// Does that mean we will get weird effects? Probably. Have to think here then.
// Reset relvnormal:
if (prev_v.z == 0.0) v0 = 0.5*(our_v + opp_v);
if (next_v.z == 0.0) v1 = 0.5*(our_v + opp_v);
if (n0 == 0.0) // generated from shardmodel from inside the insulator, then it should come out 0.
n0 = 0.5*(p_n_minor[iMinor].n_n + p_n_minor[izNeighMinor[i]].n_n);
if (n1 == 0.0)
n1 = 0.5*(p_n_minor[iMinor].n_n + p_n_minor[izNeighMinor[i]].n_n);
relvnormal = 0.5*((v0 + v1).xypart()
- v_overall0 - v_overall1).dot(edge_normal);
if (relvnormal > 0.0) {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(our_v);
} else {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v);
};
} else {
// Looking down into insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
}
else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
}
else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
};
// will be a 0 contribution if endpt1 = endpt0, that's ok.
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// should be facing towards (0,0).
// Insulator arc isn't moving, no v_overall.
relvnormal = our_v.dotxy(edge_normal);
if (relvnormal > 0.0) {
f64 n_edge = p_n_minor[iMinor].n_n;
// Only the vr component is reversed!!!
// f64 vr = -our_v.vxy.dot(edge_normal) / edge_normal.modulus();
// rhat = -edge_normal/edge_normal.modulus();
// v-= vr rhat
f64_vec2 vr_rhat = edge_normal*((our_v.dotxy(edge_normal)) /
(edge_normal.dot(edge_normal)));
// positive amt * negative r vector = negative amt * positive r vector.
f64 vr_squared = our_v.dotxy(edge_normal)*our_v.dotxy(edge_normal) /
edge_normal.dot(edge_normal);
MAR_neut -= 2.0*relvnormal*n_edge*Make3(vr_rhat, 0.0);
// Now add heat:
// change in 0.5 Nvv = 0.5v d/dt(Nv) = vr*vr*n_edge*relvnormal since v dot vr rhat = vr^2
// change in 1.5 NT should cancel this.
NT_addition_tri[iMinor].NnTn += 0.6666666666667*m_n*vr_squared*n_edge*relvnormal;
};
// If we are pulling away from the ins, do nothing!
};
};
} else {
// Typical edge.
if (relvnormal > 0.0) {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(our_v);
} else {
MAR_neut -= 0.5*relvnormal* (n0 + n1) *(opp_v);
};
if (((TESTVNY2)))
printf("advectiveGPU %d i %d MAR_neut.y %1.12E contrib >0 %1.12E <0 %1.12E relvnormal %1.12E\n"
"n0 %1.12E n1 %1.12E v01.y %1.12E %1.12E vxyours.y %1.12E opp %1.12E\n"
"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n",
CHOSEN, i,
MAR_neut.y,
-0.5*relvnormal* (n0 + n1) *(our_v.y), -0.5*relvnormal* (n0 + n1) *opp_v.y,
relvnormal,
n0, n1, v0.y, v1.y, our_v.y, opp_v.y);
};
// Notice that we also conserved momentum while we were doing ionization changes, or that was the intention.
iprev = i;
endpt0 = endpt1;
prevpos = opppos;
prev_v = opp_v;
prev_v_overall = opp_v_overall;
opppos = nextpos;
opp_v = next_v;
opp_v_overall = next_v_overall;
};
memcpy(&(p_MAR_neut[iMinor]), &(MAR_neut), sizeof(f64_vec3));
} else {
// Not domain, not crossing_ins, not a frill
// ==========================================
} // non-domain tri
}; // was it FRILL
}
__global__ void kernelAntiAdvect(
f64 const h_use,
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighTriMinor,
char * __restrict__ p_szPBCtriminor,
AAdot * __restrict__ p_AAdot,
f64_vec2 * __restrict__ p_v_overall_minor,
AAdot * __restrict__ p_AAdot_dest
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ AAdot shared_AAdot[threadsPerTileMinor]; // 3+2+2+1=8 per thread
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
__shared__ AAdot shared_AAdot_verts[threadsPerTileMajor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
structural info = p_info_minor[iMinor];
shared_pos[threadIdx.x] = info.pos;
shared_AAdot[threadIdx.x] = p_AAdot[iMinor];
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
shared_AAdot_verts[threadIdx.x] = p_AAdot[iVertex + BEGINNING_OF_CENTRAL];
};
__syncthreads();
AAdot oppA, prevA, nextA, ourA;
f64_vec2 opppos, prevpos, nextpos, Integ_grad_Az, Integ_grad_Azdot;
f64 AreaMinor;
if (threadIdx.x < threadsPerTileMajor)
{
if (info.flag == DOMAIN_VERTEX) {// otherwise no move
AreaMinor = 0.0;
Integ_grad_Az.x = 0.0;
Integ_grad_Az.y = 0.0;
Integ_grad_Azdot.x = 0.0;
Integ_grad_Azdot.y = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
ourA = shared_AAdot_verts[threadIdx.x];
short iprev = tri_len - 1;
short i = 0;
short inext;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevA = shared_AAdot[izTri[iprev] - StartMinor];
prevpos = shared_pos[izTri[iprev] - StartMinor];
}
else {
prevA = p_AAdot[izTri[iprev]];
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
}
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
}
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
oppA = shared_AAdot[izTri[i] - StartMinor];
opppos = shared_pos[izTri[i] - StartMinor];
}
else {
oppA = p_AAdot[izTri[i]];
opppos = p_info_minor[izTri[i]].pos;
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
// Think carefully: DOMAIN vertex cases for n,T ...
f64_vec2 endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 endpt1, edge_normal;
short iend = tri_len;
// we said DOMAIN_VERTEX
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextA = shared_AAdot[izTri[inext] - StartMinor];
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextA = p_AAdot[izTri[inext]];
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
f64_vec2 endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// ______________________________________________________-
// And we did what? We took n at centre of a triangle WITHIN this major cell
// But did not take upwind n ---- is that consistent for all advection?
f64 Az_edge = SIXTH * (2.0*ourA.Az + 2.0*oppA.Az + prevA.Az + nextA.Az);
Integ_grad_Az += Az_edge*edge_normal;
f64 Azdot_edge = SIXTH * (2.0*ourA.Azdot + 2.0*oppA.Azdot + prevA.Azdot + nextA.Azdot);
Integ_grad_Azdot += Azdot_edge*edge_normal;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
prevA = oppA;
opppos = nextpos;
oppA = nextA;
}; // next i
f64_vec2 Grad_Az = Integ_grad_Az / AreaMinor;
f64_vec2 Grad_Azdot = Integ_grad_Azdot / AreaMinor;
AAdot AAdot_dest;
f64_vec2 v_overall = p_v_overall_minor[iVertex + BEGINNING_OF_CENTRAL];
AAdot_dest.Az = ourA.Az + h_use*Grad_Az.dot(v_overall);
AAdot_dest.Azdot = ourA.Azdot + h_use*Grad_Azdot.dot(v_overall);
// Why was this minus?
p_AAdot_dest[iVertex + BEGINNING_OF_CENTRAL] = AAdot_dest;
} else {
p_AAdot_dest[iVertex + BEGINNING_OF_CENTRAL] = shared_AAdot_verts[threadIdx.x];
};
};
// now the minor with n_ion part:
info = p_info_minor[iMinor];
ourA = shared_AAdot[threadIdx.x];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighTriMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
AreaMinor = 0.0;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
Integ_grad_Az.x = 0.0;
Integ_grad_Az.y = 0.0;
Integ_grad_Azdot.x = 0.0;
Integ_grad_Azdot.y = 0.0;
short inext, iprev = 5, i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevA = shared_AAdot[izNeighMinor[iprev] - StartMinor];
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
prevA = shared_AAdot_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
prevA = p_AAdot[izNeighMinor[iprev]];
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) {
prevpos = Clockwise_d*prevpos;
};
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) {
prevpos = Anticlockwise_d*prevpos;
};
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
oppA = shared_AAdot[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
oppA = shared_AAdot_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
oppA = p_AAdot[izNeighMinor[i]];
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) {
opppos = Clockwise_d*opppos;
}
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) {
opppos = Anticlockwise_d*opppos;
}
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
nextA = shared_AAdot[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
nextA = shared_AAdot_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
nextA = p_AAdot[izNeighMinor[inext]];
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) {
nextpos = Clockwise_d*nextpos;
}
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) {
nextpos = Anticlockwise_d*nextpos;
}
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
f64 Az_edge = SIXTH * (2.0*ourA.Az + 2.0*oppA.Az + prevA.Az + nextA.Az);
Integ_grad_Az += Az_edge*edge_normal;
f64 Azdot_edge = SIXTH * (2.0*ourA.Azdot + 2.0*oppA.Azdot + prevA.Azdot + nextA.Azdot);
Integ_grad_Azdot += Azdot_edge*edge_normal;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevA = oppA;
prevpos = opppos;
oppA = nextA;
opppos = nextpos;
};
f64_vec2 Grad_Az = Integ_grad_Az / AreaMinor;
f64_vec2 Grad_Azdot = Integ_grad_Azdot / AreaMinor;
AAdot AAdot_dest;
f64_vec2 v_overall = p_v_overall_minor[iMinor];
AAdot_dest.Az = ourA.Az + h_use*Grad_Az.dot(v_overall);
AAdot_dest.Azdot = ourA.Azdot + h_use*Grad_Azdot.dot(v_overall);
// Why was this minus?
p_AAdot_dest[iMinor] = AAdot_dest;
} else {
p_AAdot_dest[iMinor] = shared_AAdot[threadIdx.x]; // no move
};
}
__global__ void kernelGet_AreaMinorFluid(
structural * __restrict__ p_info_minor,
long * __restrict__ p_izTri,
char * __restrict__ p_szPBC,
long * __restrict__ p_izNeighMinor,
char * __restrict__ p_szPBCtriminor,
bool * __restrict__ bz_pressureflag,
f64 * __restrict__ p_AreaMinor
)
{
__shared__ f64_vec2 shared_pos[threadsPerTileMinor];
__shared__ f64_vec2 shared_pos_verts[threadsPerTileMajor];
long const iMinor = blockDim.x*blockIdx.x + threadIdx.x;
long const iVertex = threadsPerTileMajor*blockIdx.x + threadIdx.x; // only meaningful threadIdx.x < threadsPerTileMajor
long const StartMinor = threadsPerTileMinor*blockIdx.x;
long const StartMajor = threadsPerTileMajor*blockIdx.x;
long const EndMinor = StartMinor + threadsPerTileMinor;
long const EndMajor = StartMajor + threadsPerTileMajor;
shared_pos[threadIdx.x] = p_info_minor[iMinor].pos;
structural info;
if (threadIdx.x < threadsPerTileMajor) {
info = p_info_minor[iVertex + BEGINNING_OF_CENTRAL];
shared_pos_verts[threadIdx.x] = info.pos;
};
__syncthreads();
f64_vec2 opppos, prevpos, nextpos;
if (threadIdx.x < threadsPerTileMajor) {
f64 AreaMinor = 0.0;
long izTri[MAXNEIGH];
char szPBC[MAXNEIGH];
short tri_len = info.neigh_len;
memcpy(izTri, p_izTri + iVertex*MAXNEIGH, MAXNEIGH * sizeof(long));
memcpy(szPBC, p_szPBC + iVertex*MAXNEIGH, MAXNEIGH * sizeof(char));
{
// Let's say we do for every vertex.
short iprev = tri_len - 1;
if ((izTri[iprev] >= StartMinor) && (izTri[iprev] < EndMinor))
{
prevpos = shared_pos[izTri[iprev] - StartMinor];
} else {
prevpos = p_info_minor[izTri[iprev]].pos;
}
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
short inext, i = 0;
if ((izTri[i] >= StartMinor) && (izTri[i] < EndMinor))
{
opppos = shared_pos[izTri[i] - StartMinor];
} else {
opppos = p_info_minor[izTri[i]].pos;
}
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
// Think carefully: DOMAIN vertex cases for n,T ...
f64_vec2 endpt1, endpt0 = THIRD * (info.pos + opppos + prevpos);
f64_vec2 store_first_point = endpt0;
short iend = tri_len;
f64_vec2 projendpt0, edge_normal;
if ((info.flag == INNERMOST) || (info.flag == OUTERMOST)) iend = tri_len - 2;
// Bear in mind for OUTERMOST, the triangles go clockwise not anticlockwise.
if ((info.flag == INNERMOST)) {
endpt0.project_to_radius(projendpt0, FRILL_CENTROID_INNER_RADIUS_d); // back of cell for Lap purposes
edge_normal.x = endpt0.y - projendpt0.y;
edge_normal.y = projendpt0.x - endpt0.x;
AreaMinor += (0.5*projendpt0.x + 0.5*endpt0.x)*edge_normal.x;
};
for (i = 0; i < iend; i++)
{
// Tri 0 is anticlockwise of neighbour 0, we think
inext = i + 1; if (inext >= tri_len) inext = 0;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
} else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
}; // next i
if (info.flag == INNERMOST) {
// Now add on the final sides to give area:
f64_vec2 projendpt1;
endpt1.project_to_radius(projendpt1, FRILL_CENTROID_INNER_RADIUS_d);
edge_normal.x = projendpt1.y - endpt1.y;
edge_normal.y = endpt1.x - projendpt1.x;
AreaMinor += (0.5*projendpt1.x + 0.5*endpt1.x)*edge_normal.x;
edge_normal.x = projendpt0.y - projendpt1.y;
edge_normal.y = projendpt1.x - projendpt0.x;
AreaMinor += (0.5*projendpt1.x + 0.5*projendpt0.x)*edge_normal.x;
// unchanged... check later
}
if (info.flag == OUTERMOST)
{
// 3 sides to add.
// 3 4
// 2 0
// 1
// endpt0=endpt1 is now the point north of edge facing 2.
// opppos is centre of tri (3).
info.pos.project_to_radius(nextpos, FRILL_CENTROID_OUTER_RADIUS_d);
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc:
f64 radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
inext = tri_len - 1;
if ((izTri[inext] >= StartMinor) && (izTri[inext] < EndMinor))
{
nextpos = shared_pos[izTri[inext] - StartMinor];
}
else {
nextpos = p_info_minor[izTri[inext]].pos;
}
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
endpt1 = THIRD*(opppos + info.pos + nextpos);
// map radially inwards so that radius is halfway out to the zero arc.
radiusnow = endpt1.modulus();
endpt1 *= ((0.5*(info.pos.modulus() + FRILL_CENTROID_OUTER_RADIUS_d)) / radiusnow);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
// That was the side looking out.
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
endpt1 = store_first_point;
nextpos = p_info_minor[izTri[0]].pos;
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
// As with our other points, edge_normal points inwards because 1 is clockwise of 0.
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
};
p_AreaMinor[iVertex + BEGINNING_OF_CENTRAL] = AreaMinor;
};
};// if (threadIdx.x < threadsPerTileMajor)
// This branching is itself a good argument for doing Az in ITS own separate routine with no need for n_shard.
// __syncthreads(); // end of first vertex part
// Do we need syncthreads? Not overwriting any shared data here...
// now the minor with n_ion part:
info = p_info_minor[iMinor];
long izNeighMinor[6];
char szPBC[6];
memcpy(izNeighMinor, p_izNeighMinor + iMinor * 6, sizeof(long) * 6);
memcpy(szPBC, p_szPBCtriminor + iMinor * 6, sizeof(char) * 6);
if ((info.flag == OUTER_FRILL) || (info.flag == INNER_FRILL)) {
p_AreaMinor[iMinor] = 1.0e-12;
}
else {
f64 AreaMinor = 0.0;
short iprev, inext, i;
if ((info.flag == DOMAIN_TRIANGLE) || (info.flag == CROSSING_INS)) {
iprev = 5;
i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
// if (iMinor == CHOSEN) printf("%d : endpt %1.8E %1.8E | %1.8E %1.8E ;\n ",
// iMinor, endpt0.x, endpt0.y, endpt1.x, endpt1.y);
// To get integral grad we add the averages along the edges times edge_normals
// if (iMinor == CHOSEN) printf("%d : %d opppos %1.8E %1.8E \n",
// iMinor, izNeighMinor[i], opppos.x, opppos.y);
// To get integral grad we add the averages along the edges times edge_normals
if (info.flag == CROSSING_INS) {
char flag = p_info_minor[izNeighMinor[i]].flag;
if ((flag == DOMAIN_TRIANGLE) || (flag == DOMAIN_VERTEX))
{
} else {
// Looking into the insulator we see a reflection of nT. Here we look into an out-of-domain tri or vert below ins.
// Or allowed a below-ins value to affect something anyway.
if (flag == CROSSING_INS) {
// make the edge go from the upper point, down to the insulator.
// endpt0 = THIRD * (prevpos + info.pos + opppos);
// endpt1 = THIRD * (nextpos + info.pos + opppos);
// edge_normal.x = endpt1.y - endpt0.y;
// edge_normal.y = endpt0.x - endpt1.x;
// Basically radius changes almost linearly as we move from endpt1 to endpt0.
f64 r1 = endpt0.modulus();
f64 r2 = endpt1.modulus();
if (r1 > r2) {
// 0 is higher
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(endpt0 - endpt1);
endpt1 = point; // use this value again later for AreaMinor if nothing else
} else {
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r1) / (r2 - r1))*(endpt1 - endpt0);
endpt0 = point;
};
} else {
// looking out the bottom of the insulator triangle at a within-insulator vertex or triangle.
// so we want to project the point up to the insulator.
// Use prevpos, nextpos to determine what we are looking at? Can't. need flags.
char prevflag = p_info_minor[izNeighMinor[iprev]].flag;
char nextflag = p_info_minor[izNeighMinor[inext]].flag;
// Let's distinguish several cases:
if (prevflag == CROSSING_INS)
{
// endpt0 is THIRD * (prevpos + info.pos + opppos)
// move towards the position that is 2 previous --- ie the vertex above.
// (Don't forget PBC.)
int iprevprev = iprev - 1; if (iprevprev < 0) iprevprev = 5;
f64_vec2 prevprevpos = p_info_minor[izNeighMinor[iprevprev]].pos;
if (szPBC[iprevprev] == ROTATE_ME_CLOCKWISE) prevprevpos = Clockwise_d*prevprevpos;
if (szPBC[iprevprev] == ROTATE_ME_ANTICLOCKWISE) prevprevpos = Anticlockwise_d*prevprevpos;
f64 r1 = prevprevpos.modulus();
f64 r2 = endpt0.modulus();
f64_vec2 point = endpt0 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(prevprevpos - endpt0);
endpt0 = point;
} else {
// prevflag will say it is below ins.
// Safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt0, DEVICE_RADIUS_INSULATOR_OUTER);
};
if (nextflag == CROSSING_INS)
{
// We still want to move towards vertex above. But now it's 2 next
// Don't forget PBC
int inextnext = inext + 1; if (inextnext == 6) inextnext = 0;
f64_vec2 nextnextpos = p_info_minor[izNeighMinor[inextnext]].pos;
if (szPBC[inextnext] == ROTATE_ME_CLOCKWISE) nextnextpos = Clockwise_d*nextnextpos;
if (szPBC[inextnext] == ROTATE_ME_ANTICLOCKWISE) nextnextpos = Anticlockwise_d*nextnextpos;
f64 r1 = nextnextpos.modulus();
f64 r2 = endpt1.modulus();
f64_vec2 point = endpt1 + ((DEVICE_RADIUS_INSULATOR_OUTER - r2) / (r1 - r2))*(nextnextpos - endpt1);
endpt1 = point;
}
else {
// safest way: put point at the projection of our own position to insulator, maybe slightly off.
info.pos.project_to_radius(endpt1, DEVICE_RADIUS_INSULATOR_OUTER);
}
// will be a 0 contribution if endpt1 = endpt0, that's ok.
};
}; // domain triangle opposite or not
} else {
};
// if (iMinor == CHOSEN) printf(" endpt %1.8E %1.8E | %1.8E %1.8E ; \n",
// endpt0.x, endpt0.y, endpt1.x, endpt1.y);
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*(endpt1.y - endpt0.y);
// See a way that FP accuracy was eroded: we take a difference of two close things already to get edge_normal.
// can that be cleverly avoided? For all calcs?
endpt0 = endpt1;
iprev = i;
prevpos = opppos;
opppos = nextpos;
};
// No setting a_r = 0
p_AreaMinor[iMinor] = AreaMinor;
}
else {
// Not domain, not crossing_ins, not a frill
// ==========================================
iprev = 5; i = 0;
if ((izNeighMinor[iprev] >= StartMinor) && (izNeighMinor[iprev] < EndMinor))
{
prevpos = shared_pos[izNeighMinor[iprev] - StartMinor];
}
else {
if ((izNeighMinor[iprev] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[iprev] < EndMajor + BEGINNING_OF_CENTRAL))
{
prevpos = shared_pos_verts[izNeighMinor[iprev] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
prevpos = p_info_minor[izNeighMinor[iprev]].pos;
};
};
if (szPBC[iprev] == ROTATE_ME_CLOCKWISE) prevpos = Clockwise_d*prevpos;
if (szPBC[iprev] == ROTATE_ME_ANTICLOCKWISE) prevpos = Anticlockwise_d*prevpos;
i = 0;
if ((izNeighMinor[i] >= StartMinor) && (izNeighMinor[i] < EndMinor))
{
opppos = shared_pos[izNeighMinor[i] - StartMinor];
}
else {
if ((izNeighMinor[i] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[i] < EndMajor + BEGINNING_OF_CENTRAL))
{
opppos = shared_pos_verts[izNeighMinor[i] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
opppos = p_info_minor[izNeighMinor[i]].pos;
};
};
if (szPBC[i] == ROTATE_ME_CLOCKWISE) opppos = Clockwise_d*opppos;
if (szPBC[i] == ROTATE_ME_ANTICLOCKWISE) opppos = Anticlockwise_d*opppos;
#pragma unroll
for (i = 0; i < 6; i++)
{
inext = i + 1; if (inext > 5) inext = 0;
if ((izNeighMinor[inext] >= StartMinor) && (izNeighMinor[inext] < EndMinor))
{
nextpos = shared_pos[izNeighMinor[inext] - StartMinor];
}
else {
if ((izNeighMinor[inext] >= StartMajor + BEGINNING_OF_CENTRAL) &&
(izNeighMinor[inext] < EndMajor + BEGINNING_OF_CENTRAL))
{
nextpos = shared_pos_verts[izNeighMinor[inext] - BEGINNING_OF_CENTRAL - StartMajor];
}
else {
nextpos = p_info_minor[izNeighMinor[inext]].pos;
};
};
if (szPBC[inext] == ROTATE_ME_CLOCKWISE) nextpos = Clockwise_d*nextpos;
if (szPBC[inext] == ROTATE_ME_ANTICLOCKWISE) nextpos = Anticlockwise_d*nextpos;
// New definition of endpoint of minor edge:
f64_vec2 endpt0, endpt1, edge_normal, integ_grad_Az;
endpt0 = THIRD * (prevpos + info.pos + opppos);
endpt1 = THIRD * (nextpos + info.pos + opppos);
edge_normal.x = endpt1.y - endpt0.y;
edge_normal.y = endpt0.x - endpt1.x;
AreaMinor += (0.5*endpt0.x + 0.5*endpt1.x)*edge_normal.x;
endpt0 = endpt1;
prevpos = opppos;
opppos = nextpos;
};
p_AreaMinor[iMinor] = AreaMinor;
} // non-domain tri
}; // was it FRILL
}
__global__ void kernelAccumulateSummandsVisc(
f64_vec2 * __restrict__ p_eps_xy, //
f64 * __restrict__ p_eps_iz,
f64 * __restrict__ p_eps_ez,
f64_vec2 * __restrict__ p_d_epsxy_by_d_beta, // f64_vec2
f64 * __restrict__ p_d_eps_iz_by_d_beta,
f64 * __restrict__ p_d_eps_ez_by_d_beta,
// outputs:
f64 * __restrict__ p_sum_eps_deps_, // 8 values for this block
f64 * __restrict__ p_sum_product_matrix_
)
{
__shared__ f64 sumdata_eps_deps[threadsPerTileMinor / 4][REGRESSORS];
__shared__ f64 sum_product[threadsPerTileMinor / 4][REGRESSORS][REGRESSORS];
// Call with threadsPerTileMinor/4
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
long const iMinor = threadIdx.x + blockIdx.x * threadsPerTileMinor;
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
f64_vec2 depsbydbeta2[REGRESSORS];
f64 depsbydbeta[REGRESSORS], depsbydbeta_e[REGRESSORS];
f64_vec2 eps_xy;
f64 eps_iz, eps_ez;
int i, j;
memset(&(sumdata_eps_deps[threadIdx.x]), 0, sizeof(f64)*REGRESSORS);
memset(&(sum_product[threadIdx.x]), 0, sizeof(f64)*REGRESSORS*REGRESSORS);
eps_xy = p_eps_xy[iMinor];
eps_iz = p_eps_iz[iMinor];
eps_ez = p_eps_ez[iMinor];
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
depsbydbeta2[i] = p_d_epsxy_by_d_beta[iMinor + i*NMINOR];
depsbydbeta[i] = p_d_eps_iz_by_d_beta[iMinor + i*NMINOR];
depsbydbeta_e[i] = p_d_eps_ez_by_d_beta[iMinor + i*NMINOR];
};
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] = depsbydbeta2[i].dot(eps_xy)
+ depsbydbeta[i]*eps_iz + depsbydbeta_e[i]*eps_ez;
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] = depsbydbeta2[i].dot(depsbydbeta2[j])
+ depsbydbeta[i] * depsbydbeta[j] + depsbydbeta_e[i] * depsbydbeta_e[j];
};
eps_xy = p_eps_xy[iMinor + threadsPerTileMinor / 4];
eps_iz = p_eps_iz[iMinor + threadsPerTileMinor / 4];
eps_ez = p_eps_ez[iMinor + threadsPerTileMinor / 4];
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
depsbydbeta2[i] = p_d_epsxy_by_d_beta[iMinor + threadsPerTileMinor / 4 + i*NMINOR];
depsbydbeta[i] = p_d_eps_iz_by_d_beta[iMinor + threadsPerTileMinor / 4 + i*NMINOR];
depsbydbeta_e[i] = p_d_eps_ez_by_d_beta[iMinor + threadsPerTileMinor / 4 + i*NMINOR];
};
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += depsbydbeta2[i].dot(eps_xy)
+ depsbydbeta[i] * eps_iz + depsbydbeta_e[i] * eps_ez;
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += depsbydbeta2[i].dot(depsbydbeta2[j])
+ depsbydbeta[i] * depsbydbeta[j] + depsbydbeta_e[i] * depsbydbeta_e[j];
};
eps_xy = p_eps_xy[iMinor + threadsPerTileMinor / 2];
eps_iz = p_eps_iz[iMinor + threadsPerTileMinor / 2];
eps_ez = p_eps_ez[iMinor + threadsPerTileMinor / 2];
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
depsbydbeta2[i] = p_d_epsxy_by_d_beta[iMinor + threadsPerTileMinor / 2 + i*NMINOR];
depsbydbeta[i] = p_d_eps_iz_by_d_beta[iMinor + threadsPerTileMinor / 2 + i*NMINOR];
depsbydbeta_e[i] = p_d_eps_ez_by_d_beta[iMinor + threadsPerTileMinor / 2 + i*NMINOR];
};
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += depsbydbeta2[i].dot(eps_xy)
+ depsbydbeta[i] * eps_iz + depsbydbeta_e[i] * eps_ez;
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += depsbydbeta2[i].dot(depsbydbeta2[j])
+ depsbydbeta[i] * depsbydbeta[j] + depsbydbeta_e[i] * depsbydbeta_e[j];
};
eps_xy = p_eps_xy[iMinor + 3 * threadsPerTileMinor / 4];
eps_iz = p_eps_iz[iMinor + 3 * threadsPerTileMinor / 4];
eps_ez = p_eps_ez[iMinor + 3 * threadsPerTileMinor / 4];
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
depsbydbeta2[i] = p_d_epsxy_by_d_beta[iMinor + 3 * threadsPerTileMinor / 4 + i*NMINOR];
depsbydbeta[i] = p_d_eps_iz_by_d_beta[iMinor + 3 * threadsPerTileMinor / 4 + i*NMINOR];
depsbydbeta_e[i] = p_d_eps_ez_by_d_beta[iMinor + 3 * threadsPerTileMinor / 4 + i*NMINOR];
};
#pragma unroll
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += depsbydbeta2[i].dot(eps_xy)
+ depsbydbeta[i] * eps_iz + depsbydbeta_e[i] * eps_ez;
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += depsbydbeta2[i].dot(depsbydbeta2[j])
+ depsbydbeta[i] * depsbydbeta[j] + depsbydbeta_e[i] * depsbydbeta_e[j];
};
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += sumdata_eps_deps[threadIdx.x + k][i];
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += sum_product[threadIdx.x + k][i][j];
};
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
for (i = 0; i < REGRESSORS; i++)
{
sumdata_eps_deps[threadIdx.x][i] += sumdata_eps_deps[threadIdx.x + s - 1][i];
for (j = 0; j < REGRESSORS; j++)
sum_product[threadIdx.x][i][j] += sum_product[threadIdx.x + s - 1][i][j];
};
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
memcpy(&(p_sum_eps_deps_[blockIdx.x*REGRESSORS]), &(sumdata_eps_deps[0][0]), sizeof(f64)*REGRESSORS);
memcpy(&(p_sum_product_matrix_[blockIdx.x*REGRESSORS*REGRESSORS]), &(sum_product[0][0][0]), sizeof(f64)*REGRESSORS*REGRESSORS);
};
}
__global__ void SplitVector4(
f64_vec2 * __restrict__ p_xy,
f64 * __restrict__ p_z1,
f64 * __restrict__ p_z2,
v4 * __restrict__ p_v4,
int * __restrict__ p_Select
)
{
long const iMinor = threadIdx.x + blockIdx.x * threadsPerTileMinor;
v4 temp;
if (p_Select[iMinor] == 0) {
memset(&temp, 0, sizeof(v4));
} else {
temp = p_v4[iMinor];
}
p_xy[iMinor] = temp.vxy;
p_z1[iMinor] = temp.viz;
p_z2[iMinor] = temp.vez;
}
__global__ void kernelAccumulateSummandsProduct(
f64_vec2 * __restrict__ p_eps_xy, //
f64 * __restrict__ p_eps_iz,
f64 * __restrict__ p_eps_ez,
f64_vec2 * __restrict__ p_d_epsxy_by_d_beta, // f64_vec2
f64 * __restrict__ p_d_eps_iz_by_d_beta,
f64 * __restrict__ p_d_eps_ez_by_d_beta,
// outputs:
f64 * __restrict__ p_sum_eps_deps_
)
{
__shared__ f64 sumdata_eps_deps[threadsPerTileMinor];
long const iMinor = threadIdx.x + blockIdx.x * threadsPerTileMinor;
f64_vec2 depsbydbeta2;
f64 depsbydbeta, depsbydbeta_e;
f64_vec2 eps_xy;
f64 eps_iz, eps_ez;
int i, j;
sumdata_eps_deps[threadIdx.x] = 0.0;
//memset(&(sumdata_eps_deps[threadIdx.x]), 0, sizeof(f64)*REGRESSORS);
eps_xy = p_eps_xy[iMinor];
eps_iz = p_eps_iz[iMinor];
eps_ez = p_eps_ez[iMinor];
depsbydbeta2 = p_d_epsxy_by_d_beta[iMinor];
depsbydbeta = p_d_eps_iz_by_d_beta[iMinor];
depsbydbeta_e = p_d_eps_ez_by_d_beta[iMinor];
sumdata_eps_deps[threadIdx.x] = depsbydbeta2.dot(eps_xy)
+ depsbydbeta * eps_iz + depsbydbeta_e * eps_ez;
__syncthreads();
int s = blockDim.x;
int k = s / 2;
while (s != 1) {
if (threadIdx.x < k)
{
sumdata_eps_deps[threadIdx.x] += sumdata_eps_deps[threadIdx.x + k];
};
__syncthreads();
// Modify for case blockdim not 2^n:
if ((s % 2 == 1) && (threadIdx.x == k - 1)) {
sumdata_eps_deps[threadIdx.x] += sumdata_eps_deps[threadIdx.x + s - 1];
};
// In case k == 81, add [39] += [80]
// Otherwise we only get to 39+40=79.
s = k;
k = s / 2;
__syncthreads();
};
if (threadIdx.x == 0)
{
p_sum_eps_deps_[blockIdx.x] = sumdata_eps_deps[0];
};
}
|
d3e706bfa7f8ab0055088d48077a15255aa719ce.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// These headers should not be added since NVCC takes care, but
// for VSCode Intellisense we need them so it recognizes CUDA functions
//
#ifdef __INTELLISENSE__
#include <hip/hip_runtime.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/device_functions.h>
#include <hip/hip_runtime_api.h>
#endif
#include <omp.h>
#include <vector>
#include <cmath>
#include <iostream>
// For Single Precision
using cu_prec = float;
#define cu_cos cosf
#define N_REP 200
// // For Double Precision
// using cu_prec = double;
// #define cu_cos cos
// CUDA Kernel
__global__ void fill_vec(cu_prec *f, cu_prec *x, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// If the tread number is larger than the problem, do nothing
if (i >= n)
return;
cu_prec x_local = x[i]; // Read from global memory
cu_prec f_local = (cu_prec)0.0;
#pragma unroll
for (int j = 0; j < N_REP; j++)
f_local += cu_cos(j * x_local) / (j + 1);
f[i] = f_local; // Fill the output in global memory
}
int main(void)
{
int n = 10000000;
double t1, t2;
std::vector<cu_prec> f(n, 0.0), h_f(n), h_x(n);
for (int i = 0; i < n; i++)
h_x[i] = (cu_prec)i;
t1 = omp_get_wtime();
for (int i = 0; i < n; i++)
{
for (int j = 0; j < N_REP; j++)
f[i] += std::cos(j * h_x[i]) / (j + 1);
}
t2 = omp_get_wtime();
std::cout << "Time CPU = " << t2 - t1 << " sec" << std::endl;
// Execute the CUDA Implementation
t1 = omp_get_wtime();
cu_prec *d_f, *d_x; // Functions on the device
int bytes = n * sizeof(cu_prec); // Size in memory of the arrays
hipMalloc(&d_f, bytes);
hipMalloc(&d_x, bytes);
hipMemcpy(d_x, &(h_x[0]), bytes, hipMemcpyHostToDevice);
int n_threads_per_block = 32;
int n_blocks = ::ceil(((double)n) / ((double)n_threads_per_block));
hipLaunchKernelGGL(( fill_vec), dim3(n_blocks), dim3(n_threads_per_block), 0, 0, d_f, d_x, n);
hipMemcpy(&(h_f[0]), d_f, bytes, hipMemcpyDeviceToHost);
t2 = omp_get_wtime();
std::cout << "Time GPU = " << t2 - t1 << " sec" << std::endl;
hipFree(d_f);
hipFree(d_x);
//
// Check for the error
//
cu_prec max_err;
for (int i = 0; i < n; i++)
{
cu_prec err_tmp = std::abs(h_f[i] - f[i]);
if (i == 0 || (err_tmp > max_err))
max_err = err_tmp;
}
std::cout << "Error = " << max_err << std::endl;
return 0;
}
|
d3e706bfa7f8ab0055088d48077a15255aa719ce.cu
|
//
// These headers should not be added since NVCC takes care, but
// for VSCode Intellisense we need them so it recognizes CUDA functions
//
#ifdef __INTELLISENSE__
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <cuda_runtime_api.h>
#endif
#include <omp.h>
#include <vector>
#include <cmath>
#include <iostream>
// For Single Precision
using cu_prec = float;
#define cu_cos cosf
#define N_REP 200
// // For Double Precision
// using cu_prec = double;
// #define cu_cos cos
// CUDA Kernel
__global__ void fill_vec(cu_prec *f, cu_prec *x, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
// If the tread number is larger than the problem, do nothing
if (i >= n)
return;
cu_prec x_local = x[i]; // Read from global memory
cu_prec f_local = (cu_prec)0.0;
#pragma unroll
for (int j = 0; j < N_REP; j++)
f_local += cu_cos(j * x_local) / (j + 1);
f[i] = f_local; // Fill the output in global memory
}
int main(void)
{
int n = 10000000;
double t1, t2;
std::vector<cu_prec> f(n, 0.0), h_f(n), h_x(n);
for (int i = 0; i < n; i++)
h_x[i] = (cu_prec)i;
t1 = omp_get_wtime();
for (int i = 0; i < n; i++)
{
for (int j = 0; j < N_REP; j++)
f[i] += std::cos(j * h_x[i]) / (j + 1);
}
t2 = omp_get_wtime();
std::cout << "Time CPU = " << t2 - t1 << " sec" << std::endl;
// Execute the CUDA Implementation
t1 = omp_get_wtime();
cu_prec *d_f, *d_x; // Functions on the device
int bytes = n * sizeof(cu_prec); // Size in memory of the arrays
cudaMalloc(&d_f, bytes);
cudaMalloc(&d_x, bytes);
cudaMemcpy(d_x, &(h_x[0]), bytes, cudaMemcpyHostToDevice);
int n_threads_per_block = 32;
int n_blocks = std::ceil(((double)n) / ((double)n_threads_per_block));
fill_vec<<<n_blocks, n_threads_per_block>>>(d_f, d_x, n);
cudaMemcpy(&(h_f[0]), d_f, bytes, cudaMemcpyDeviceToHost);
t2 = omp_get_wtime();
std::cout << "Time GPU = " << t2 - t1 << " sec" << std::endl;
cudaFree(d_f);
cudaFree(d_x);
//
// Check for the error
//
cu_prec max_err;
for (int i = 0; i < n; i++)
{
cu_prec err_tmp = std::abs(h_f[i] - f[i]);
if (i == 0 || (err_tmp > max_err))
max_err = err_tmp;
}
std::cout << "Error = " << max_err << std::endl;
return 0;
}
|
7f3454cc767bbd8f1e2d66479f8f7f434d11e977.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <scalers/binormalization.h>
#include <sm_utils.inl>
#include <thrust/inner_product.h>
#include <solvers/block_common_solver.h>
#include <thrust_wrapper.h>
namespace amgx
{
template<class TConfig> class BinormalizationScaler;
/**********************************************************************
* HOST FUNCTIONS
*********************************************************************/
template <typename IndexType, typename MatrixType, typename VectorType>
void computeBetaGammaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *x, VectorType *y, VectorType *beta, VectorType *gamma)
{
for (int i = 0; i < rows; i++) { gamma[i] = 0.; }
for (int i = 0; i < rows; i++)
{
VectorType bi = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
bi += (val * val) * y[col];
gamma[col] += (val * val) * x[i];
}
beta[i] = bi;
}
}
// compute Gamma on its own
template <typename IndexType, typename MatrixType, typename VectorType>
void computeGammaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *x, VectorType *gamma)
{
for (int i = 0; i < rows; i++) { gamma[i] = 0.; }
for (int i = 0; i < rows; i++)
{
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
gamma[col] += (val * val) * x[i];
}
}
}
// compute Beta on its own
template <typename IndexType, typename MatrixType, typename VectorType>
void computeBetaHost(int nrows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *y, VectorType *beta)
{
for (int i = 0; i < nrows; i++)
{
VectorType bi = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
bi += (val * val) * y[col];
}
beta[i] = bi;
}
}
template <typename IndexType, typename MatrixType, typename VectorType>
void scaleMatrixHost(int nrows, IndexType *offsets, IndexType *indices, MatrixType *values,
VectorType *x, VectorType *y)
{
for (int i = 0; i < nrows; i++)
{
VectorType fi = sqrt(fabs(x[i]));
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
VectorType gj = sqrt(fabs(y[j]));
values[jj] *= fi * gj;
}
}
}
/**********************************************************************
* DEVICE FUNCTIONS
*********************************************************************/
// compute initial beta, which is B*[1,...,1]'
template <typename IndexType, typename MatrixValue, typename VectorValue>
__global__
void computeBetaIniDevice(int nrows, IndexType *offsets, IndexType *indices, MatrixValue *values, VectorValue *beta)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < nrows; i += gridDim.x * blockDim.x)
{
VectorValue rowsum = 0.0;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
rowsum += values[jj] * values[jj];
}
beta[i] = rowsum;
}
}
template <typename IndexType, typename MatrixType, typename VectorType>
__global__
void grabDiagonalVector(int nrows, IndexType *offsets, IndexType *indices, MatrixType *values, VectorType *diag)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < nrows; i += gridDim.x * blockDim.x)
{
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
IndexType j = indices[jj];
if (i == j) { diag[i] = values[jj]; }
}
}
}
// functor to generate stddev of vectors
template <typename T>
struct std_f
{
std_f(T x) : v(x) {};
T v;
__host__ __device__
T operator()(const T &x1, const T &x2) const
{
return (x1 * x2 - v) * (x1 * x2 - v);
}
};
// scaled the matrix using diag(F)*A*diag(G), f = sqrt(fabs(x)), g = sqrt(fabs(y))
template <typename IndexType, typename MatrixType, typename VectorType>
__global__
void scaleMatrixDevice(int rows, IndexType *offsets, IndexType *indices, MatrixType *values,
VectorType *x)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < rows; i += gridDim.x * blockDim.x)
{
VectorType fi = fabs(x[i]);
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
VectorType fj = fabs(x[j]);
values[jj] *= sqrt(fabs(fi * fj));
}
}
}
template <typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__
void getColRowNorms(int rows, IndexType *offsets, IndexType *indices, ValueTypeA *values,
ValueTypeB *rownorms, ValueTypeB *colnorms)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < rows; i += gridDim.x * blockDim.x)
{
ValueTypeB rownorm = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
ValueTypeB curval = values[jj] * values[jj];
rownorm += curval;
utils::atomic_add(colnorms + j, curval);
}
rownorms[i] = rownorm;
}
}
// these warp reductions should be able to be replaced with amgx:: functions
template <int warpSize, typename T>
__device__ __inline__ T warpReduceSum(T val)
{
if (warpSize > 16) { val += utils::shfl_down(val, 16, warpSize); }
if (warpSize > 8) { val += utils::shfl_down(val, 8, warpSize); }
if (warpSize > 4) { val += utils::shfl_down(val, 4, warpSize); }
if (warpSize > 2) { val += utils::shfl_down(val, 2, warpSize); }
if (warpSize > 1) { val += utils::shfl_down(val, 1, warpSize); }
return val;
}
template <int warpSize, typename T>
__device__ T warpReduceSumShared(volatile T *vals, const int lane_id)
{
if (warpSize > 16) { vals[lane_id] += vals[lane_id + 16]; }
if (warpSize > 8) { vals[lane_id] += vals[lane_id + 8]; }
if (warpSize > 4) { vals[lane_id] += vals[lane_id + 4]; }
if (warpSize > 2) { vals[lane_id] += vals[lane_id + 2]; }
if (warpSize > 1) { vals[lane_id] += vals[lane_id + 1]; }
return vals[lane_id];
}
// compute gamma = B^T*x (B = A.^2)
template <typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__
void computeBetaDevice(const int nrows, IndexType *offsets, IndexType *indices, ValueTypeA *values,
ValueTypeB *diag, ValueTypeB *x, ValueTypeB *xn, ValueTypeB *beta, const ValueTypeB avg, ValueTypeB *avg_vec)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < nrows; i += gridDim.x * blockDim.x)
{
ValueTypeB xi = x[i];
ValueTypeB bi = beta[i];
ValueTypeB di = diag[i];
ValueTypeB c0 = -di * xi * xi + 2 * bi * xi - nrows * avg;
ValueTypeB c1 = (nrows - 2) * (bi - di * xi);
ValueTypeB c2 = (nrows - 1) * di;
assert(c0 > epsilon(c0)); //
// delta = xi - x(i)
ValueTypeB dx = (2 * c0) / (-c1 - sqrt(c1 * c1 - 4 * c2 * c0)) - x[i];
ValueTypeB davg = 0.;
ValueTypeB dbeta = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
ValueTypeA Bij = values[jj] * values[jj];
davg += Bij * x[j]; // += x' * B(:, i) == B(i, :)*x, because B is symmetric
dbeta += dx * Bij;
}
beta[i] = bi + dbeta;
avg_vec[i] = dx * (davg + bi + di * dx) / nrows;
//utils::atomic_add(avg, davg);
}
}
template<typename T>
struct square_value : public unary_function<T, T>
{
__host__ __device__ T operator()(const T &x) const
{
return x * x;
}
};
// vector constant scale operand
template <typename T>
struct vmul_scale_const
{
T _alpha;
vmul_scale_const(T alpha): _alpha(alpha) {};
__host__ __device__
T operator()(const T &vec) const
{
return vec * _alpha;
}
};
// vector scale operand
template <typename T>
struct vmul_scale
{
vmul_scale() {};
__host__ __device__
T operator()(const T &vec, const T &alpha) const
{
return (vec * sqrt(fabs(alpha)));
}
};
// vector unscale operand
template <typename T>
struct vmul_unscale
{
vmul_unscale() {};
__host__ __device__
T operator()(const T &vec, const T &alpha) const
{
return (vec / sqrt(fabs(alpha)));
}
};
// Setup on Device
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setup(Matrix_d &A)
{
if (A.is_matrix_distributed())
{
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
// move these out to config parameters?
const int max_iters = 10;
const ValueTypeB tolerance = 1e-10;
VVector diag(A.get_num_rows());
hipLaunchKernelGGL(( grabDiagonalVector) , dim3(4096), dim3(128), 0, 0, A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), diag.raw());
int nrows = A.get_num_rows();
// temporary vectors
VVector x(nrows, 1), xn(nrows), davg(nrows), beta(nrows, 0);
hipLaunchKernelGGL(( computeBetaIniDevice) , dim3(4096), dim3(256), 0, 0, nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), beta.raw());
cudaCheckError();
ValueTypeB avg = thrust_wrapper::reduce(beta.begin(), beta.end()) / nrows;
// calculate initial std1 and std2
thrust::device_ptr<ValueTypeB> x_ptr(x.raw()), beta_ptr(beta.raw());
ValueTypeB stdx = sqrt(thrust::inner_product(x_ptr, x_ptr + nrows, beta_ptr, ValueTypeB(0.), thrust::plus<ValueTypeB>(), std_f<ValueTypeB>(avg)) / nrows) / avg;
for (int t = 0; t < max_iters; t++)
{
if (fabs(stdx) < tolerance) { break; } // finished
hipLaunchKernelGGL(( computeBetaDevice) , dim3(4096), dim3(256), 0, 0, nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(),
diag.raw(), x.raw(), xn.raw(), beta.raw(), avg, davg.raw());
avg += thrust_wrapper::reduce(davg.begin(), davg.end());
// ValueTypeB stdx_old = stdx;
stdx = sqrt(thrust::inner_product(x_ptr, x_ptr + nrows, beta_ptr, ValueTypeB(0.), thrust::plus<ValueTypeB>(), std_f<ValueTypeB>(avg)) / nrows) / avg;
// print it #, current error, convergence rate
// printf("ITER: %d %.3e %.3e %.4lg\n",t, stdx, stdx_old, stdx / stdx_old);
}
//Save scaling vectors for later user, setup complete
scale_vector = VVector(x);
}
// Matrix Scaling on Device
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::scaleMatrix(Matrix_d &A, ScaleDirection scaleOrUnscale)
{
if (scale_vector.size() != A.get_num_rows())
{
FatalError("Must call setup(A) before binormalization scaling can scale matrix", AMGX_ERR_NOT_IMPLEMENTED);
}
if (A.is_matrix_distributed())
{
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
int nrows = A.get_num_rows();
/*VVector rownorms(nrows, 0.0);
VVector colnorms(nrows, 0.0);
getColRowNorms<<<4096,256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), rownorms.raw(), colnorms.raw());
cudaCheckError();
ValueTypeB row_max = *(thrust::max_element(rownorms.begin(), rownorms.end()));
ValueTypeB row_min = *(thrust::min_element(rownorms.begin(), rownorms.end()));
ValueTypeB col_max = *(thrust::max_element(colnorms.begin(), colnorms.end()));
ValueTypeB col_min = *(thrust::min_element(colnorms.begin(), colnorms.end()));
cudaCheckError();
printf("Original Matrix: rowmax: %e, rowmin: %e, colmax: %e, colmin: %e\n", row_max, row_min, col_max, col_min);fflush(stdout);*/
hipLaunchKernelGGL(( scaleMatrixDevice) , dim3(4096), dim3(256), 0, 0, nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), scale_vector.raw());
cudaCheckError();
ValueTypeB C_norm = sqrt(thrust::transform_reduce(A.values.begin(), A.values.begin() + A.get_num_nz() * A.get_block_size(), square_value<ValueTypeB>(), 0., thrust::plus<ValueTypeB>()) / nrows);
thrust::transform(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), A.values.begin(), vmul_scale_const<ValueTypeB>(1. / C_norm) );
thrust::transform(scale_vector.begin(), scale_vector.end(), scale_vector.begin(), vmul_scale_const<ValueTypeB>(sqrt(1. / C_norm)) );
cudaCheckError();
/*thrust::fill(rownorms.begin(), rownorms.end(), 0.);
thrust::fill(colnorms.begin(), colnorms.end(), 0.);
getColRowNorms<<<4096,256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), rownorms.raw(), colnorms.raw());
cudaCheckError();
row_max = *(thrust::max_element(rownorms.begin(), rownorms.end()));
row_min = *(thrust::min_element(rownorms.begin(), rownorms.end()));
col_max = *(thrust::max_element(colnorms.begin(), colnorms.end()));
col_min = *(thrust::min_element(colnorms.begin(), colnorms.end()));
cudaCheckError();
printf("Scaled Matrix: rowmax: %e, rowmin: %e, colmax: %e, colmin: %e\n", row_max, row_min, col_max, col_min);fflush(stdout);*/
exit(0);
}
// Setup on Host
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setup(Matrix_h &A)
{
FatalError("Host not supported", AMGX_ERR_NOT_IMPLEMENTED);
/*if (A.is_matrix_distributed()) {
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
// move these out to config parameters?
const int max_iters = 10;
const ValueTypeB tolerance = 1e-10;
int rows = A.get_num_rows(), cols = A.get_num_cols();
// temporary vectors
VVector x(rows, 1), y(cols, 1), beta(rows, 0), gamma(cols, 0);
// perform matvecs to get beta and gamma (spmv for beta, spmvT for gamma)
computeBetaGammaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(),
x.raw(), y.raw(), beta.raw(), gamma.raw());
double std1 = 0., std2 = 0., sum1 = cols, sum2 = rows;
// calculate initial std1 and std2
for (int i=0; i<rows; i++) {
std1 += pow(x[i]*beta[i] - sum1, 2.0);
}
std1 = sqrt(std1 / rows) / sum1;
for (int i=0; i<cols; i++) {
std2 += pow(y[i]*gamma[i] - sum2, 2.0);
}
std2 = sqrt(std2 / cols) / sum2;
//printf("std1: %lg, std2: %lg\n",std1, std2);
double std_initial = sqrt((std1*std1)+(std2*std2));
double std = std_initial;
for (int t=0; t<max_iters; t++) {
if (std < tolerance) break; // finished
// x = sum1 ./ beta
for (int i=0; i<rows; i++) x[i] = ( isNotCloseToZero(beta[i]) ? sum1 / beta[i] : sum1 / epsilon(beta[i]) );
// gamma = C*x
computeGammaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), x.raw(), gamma.raw());
// gamma = 1 ./ beta
for (int i=0; i<cols; i++) y[i] = ( isNotCloseToZero(gamma[i]) ? sum2/gamma[i] : sum2 / epsilon(gamma[i]) );
// beta = B*y
computeBetaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), y.raw(), beta.raw());
//ValueTypeB std_old = std;
std = 0.;
for (int i=0; i<rows; i++) {
std += pow(x[i]*beta[i] - sum1, 2.0);
}
std = sqrt(std / rows) / sum1;
// print it #, current error, convergence rate
//printf("ITER: %d %.3e %.4lg\n",t, std, std / std_old);
}
//Save scaling vectors for later user, setup complete
left_scale = VVector(beta);
right_scale= VVector(gamma);
*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::scaleMatrix(Matrix_h &A, ScaleDirection scaleOrUnscale)
{
FatalError("Host not supported", AMGX_ERR_NOT_IMPLEMENTED);
/*if (A.is_matrix_distributed()) {
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
if (left_scale.size() != A.get_num_rows()) {
FatalError("Must call setup(A) before binormalization scaling can scale matrix", AMGX_ERR_NOT_IMPLEMENTED);
}
// A_scaled = F*A*G (f = diag(F) = sqrt(fabs(x)), g = diag(G) = sqrt(fabs(y))
// A_ij = f_i * A_ij * g_j
scaleMatrixHost(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), left_scale.raw(), right_scale.raw());*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::scaleVector(VVector &v, ScaleDirection scaleOrUnscale, ScaleSide leftOrRight)
{
if (scaleOrUnscale == amgx::SCALE)
{
thrust::transform(v.begin(), v.end(), this->scale_vector.begin(), v.begin(), vmul_scale<ValueTypeB>() );
}
else
{
thrust::transform(v.begin(), v.end(), this->scale_vector.begin(), v.begin(), vmul_unscale<ValueTypeB>() );
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::scaleVector(VVector &v, ScaleDirection scaleOrUnscale, ScaleSide leftOrRight)
{
FatalError("4x4 block size not supported", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class BinormalizationScaler_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class BinormalizationScaler<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
7f3454cc767bbd8f1e2d66479f8f7f434d11e977.cu
|
/* Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <scalers/binormalization.h>
#include <sm_utils.inl>
#include <thrust/inner_product.h>
#include <solvers/block_common_solver.h>
#include <thrust_wrapper.h>
namespace amgx
{
template<class TConfig> class BinormalizationScaler;
/**********************************************************************
* HOST FUNCTIONS
*********************************************************************/
template <typename IndexType, typename MatrixType, typename VectorType>
void computeBetaGammaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *x, VectorType *y, VectorType *beta, VectorType *gamma)
{
for (int i = 0; i < rows; i++) { gamma[i] = 0.; }
for (int i = 0; i < rows; i++)
{
VectorType bi = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
bi += (val * val) * y[col];
gamma[col] += (val * val) * x[i];
}
beta[i] = bi;
}
}
// compute Gamma on its own
template <typename IndexType, typename MatrixType, typename VectorType>
void computeGammaHost(int rows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *x, VectorType *gamma)
{
for (int i = 0; i < rows; i++) { gamma[i] = 0.; }
for (int i = 0; i < rows; i++)
{
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
gamma[col] += (val * val) * x[i];
}
}
}
// compute Beta on its own
template <typename IndexType, typename MatrixType, typename VectorType>
void computeBetaHost(int nrows, IndexType *offsets, IndexType *indices, MatrixType *vals,
VectorType *y, VectorType *beta)
{
for (int i = 0; i < nrows; i++)
{
VectorType bi = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int col = indices[jj];
VectorType val = vals[jj];
bi += (val * val) * y[col];
}
beta[i] = bi;
}
}
template <typename IndexType, typename MatrixType, typename VectorType>
void scaleMatrixHost(int nrows, IndexType *offsets, IndexType *indices, MatrixType *values,
VectorType *x, VectorType *y)
{
for (int i = 0; i < nrows; i++)
{
VectorType fi = sqrt(fabs(x[i]));
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
VectorType gj = sqrt(fabs(y[j]));
values[jj] *= fi * gj;
}
}
}
/**********************************************************************
* DEVICE FUNCTIONS
*********************************************************************/
// compute initial beta, which is B*[1,...,1]'
template <typename IndexType, typename MatrixValue, typename VectorValue>
__global__
void computeBetaIniDevice(int nrows, IndexType *offsets, IndexType *indices, MatrixValue *values, VectorValue *beta)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < nrows; i += gridDim.x * blockDim.x)
{
VectorValue rowsum = 0.0;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
rowsum += values[jj] * values[jj];
}
beta[i] = rowsum;
}
}
template <typename IndexType, typename MatrixType, typename VectorType>
__global__
void grabDiagonalVector(int nrows, IndexType *offsets, IndexType *indices, MatrixType *values, VectorType *diag)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < nrows; i += gridDim.x * blockDim.x)
{
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
IndexType j = indices[jj];
if (i == j) { diag[i] = values[jj]; }
}
}
}
// functor to generate stddev of vectors
template <typename T>
struct std_f
{
std_f(T x) : v(x) {};
T v;
__host__ __device__
T operator()(const T &x1, const T &x2) const
{
return (x1 * x2 - v) * (x1 * x2 - v);
}
};
// scaled the matrix using diag(F)*A*diag(G), f = sqrt(fabs(x)), g = sqrt(fabs(y))
template <typename IndexType, typename MatrixType, typename VectorType>
__global__
void scaleMatrixDevice(int rows, IndexType *offsets, IndexType *indices, MatrixType *values,
VectorType *x)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < rows; i += gridDim.x * blockDim.x)
{
VectorType fi = fabs(x[i]);
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
VectorType fj = fabs(x[j]);
values[jj] *= sqrt(fabs(fi * fj));
}
}
}
template <typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__
void getColRowNorms(int rows, IndexType *offsets, IndexType *indices, ValueTypeA *values,
ValueTypeB *rownorms, ValueTypeB *colnorms)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < rows; i += gridDim.x * blockDim.x)
{
ValueTypeB rownorm = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
ValueTypeB curval = values[jj] * values[jj];
rownorm += curval;
utils::atomic_add(colnorms + j, curval);
}
rownorms[i] = rownorm;
}
}
// these warp reductions should be able to be replaced with amgx:: functions
template <int warpSize, typename T>
__device__ __inline__ T warpReduceSum(T val)
{
if (warpSize > 16) { val += utils::shfl_down(val, 16, warpSize); }
if (warpSize > 8) { val += utils::shfl_down(val, 8, warpSize); }
if (warpSize > 4) { val += utils::shfl_down(val, 4, warpSize); }
if (warpSize > 2) { val += utils::shfl_down(val, 2, warpSize); }
if (warpSize > 1) { val += utils::shfl_down(val, 1, warpSize); }
return val;
}
template <int warpSize, typename T>
__device__ T warpReduceSumShared(volatile T *vals, const int lane_id)
{
if (warpSize > 16) { vals[lane_id] += vals[lane_id + 16]; }
if (warpSize > 8) { vals[lane_id] += vals[lane_id + 8]; }
if (warpSize > 4) { vals[lane_id] += vals[lane_id + 4]; }
if (warpSize > 2) { vals[lane_id] += vals[lane_id + 2]; }
if (warpSize > 1) { vals[lane_id] += vals[lane_id + 1]; }
return vals[lane_id];
}
// compute gamma = B^T*x (B = A.^2)
template <typename IndexType, typename ValueTypeA, typename ValueTypeB>
__global__
void computeBetaDevice(const int nrows, IndexType *offsets, IndexType *indices, ValueTypeA *values,
ValueTypeB *diag, ValueTypeB *x, ValueTypeB *xn, ValueTypeB *beta, const ValueTypeB avg, ValueTypeB *avg_vec)
{
for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < nrows; i += gridDim.x * blockDim.x)
{
ValueTypeB xi = x[i];
ValueTypeB bi = beta[i];
ValueTypeB di = diag[i];
ValueTypeB c0 = -di * xi * xi + 2 * bi * xi - nrows * avg;
ValueTypeB c1 = (nrows - 2) * (bi - di * xi);
ValueTypeB c2 = (nrows - 1) * di;
assert(c0 > epsilon(c0)); //
// delta = xi - x(i)
ValueTypeB dx = (2 * c0) / (-c1 - sqrt(c1 * c1 - 4 * c2 * c0)) - x[i];
ValueTypeB davg = 0.;
ValueTypeB dbeta = 0.;
for (int jj = offsets[i]; jj < offsets[i + 1]; jj++)
{
int j = indices[jj];
ValueTypeA Bij = values[jj] * values[jj];
davg += Bij * x[j]; // += x' * B(:, i) == B(i, :)*x, because B is symmetric
dbeta += dx * Bij;
}
beta[i] = bi + dbeta;
avg_vec[i] = dx * (davg + bi + di * dx) / nrows;
//utils::atomic_add(avg, davg);
}
}
template<typename T>
struct square_value : public unary_function<T, T>
{
__host__ __device__ T operator()(const T &x) const
{
return x * x;
}
};
// vector constant scale operand
template <typename T>
struct vmul_scale_const
{
T _alpha;
vmul_scale_const(T alpha): _alpha(alpha) {};
__host__ __device__
T operator()(const T &vec) const
{
return vec * _alpha;
}
};
// vector scale operand
template <typename T>
struct vmul_scale
{
vmul_scale() {};
__host__ __device__
T operator()(const T &vec, const T &alpha) const
{
return (vec * sqrt(fabs(alpha)));
}
};
// vector unscale operand
template <typename T>
struct vmul_unscale
{
vmul_unscale() {};
__host__ __device__
T operator()(const T &vec, const T &alpha) const
{
return (vec / sqrt(fabs(alpha)));
}
};
// Setup on Device
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::setup(Matrix_d &A)
{
if (A.is_matrix_distributed())
{
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
// move these out to config parameters?
const int max_iters = 10;
const ValueTypeB tolerance = 1e-10;
VVector diag(A.get_num_rows());
grabDiagonalVector <<< 4096, 128>>>(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), diag.raw());
int nrows = A.get_num_rows();
// temporary vectors
VVector x(nrows, 1), xn(nrows), davg(nrows), beta(nrows, 0);
computeBetaIniDevice <<< 4096, 256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), beta.raw());
cudaCheckError();
ValueTypeB avg = thrust_wrapper::reduce(beta.begin(), beta.end()) / nrows;
// calculate initial std1 and std2
thrust::device_ptr<ValueTypeB> x_ptr(x.raw()), beta_ptr(beta.raw());
ValueTypeB stdx = sqrt(thrust::inner_product(x_ptr, x_ptr + nrows, beta_ptr, ValueTypeB(0.), thrust::plus<ValueTypeB>(), std_f<ValueTypeB>(avg)) / nrows) / avg;
for (int t = 0; t < max_iters; t++)
{
if (fabs(stdx) < tolerance) { break; } // finished
computeBetaDevice <<< 4096, 256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(),
diag.raw(), x.raw(), xn.raw(), beta.raw(), avg, davg.raw());
avg += thrust_wrapper::reduce(davg.begin(), davg.end());
// ValueTypeB stdx_old = stdx;
stdx = sqrt(thrust::inner_product(x_ptr, x_ptr + nrows, beta_ptr, ValueTypeB(0.), thrust::plus<ValueTypeB>(), std_f<ValueTypeB>(avg)) / nrows) / avg;
// print it #, current error, convergence rate
// printf("ITER: %d %.3e %.3e %.4lg\n",t, stdx, stdx_old, stdx / stdx_old);
}
//Save scaling vectors for later user, setup complete
scale_vector = VVector(x);
}
// Matrix Scaling on Device
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::scaleMatrix(Matrix_d &A, ScaleDirection scaleOrUnscale)
{
if (scale_vector.size() != A.get_num_rows())
{
FatalError("Must call setup(A) before binormalization scaling can scale matrix", AMGX_ERR_NOT_IMPLEMENTED);
}
if (A.is_matrix_distributed())
{
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
int nrows = A.get_num_rows();
/*VVector rownorms(nrows, 0.0);
VVector colnorms(nrows, 0.0);
getColRowNorms<<<4096,256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), rownorms.raw(), colnorms.raw());
cudaCheckError();
ValueTypeB row_max = *(thrust::max_element(rownorms.begin(), rownorms.end()));
ValueTypeB row_min = *(thrust::min_element(rownorms.begin(), rownorms.end()));
ValueTypeB col_max = *(thrust::max_element(colnorms.begin(), colnorms.end()));
ValueTypeB col_min = *(thrust::min_element(colnorms.begin(), colnorms.end()));
cudaCheckError();
printf("Original Matrix: rowmax: %e, rowmin: %e, colmax: %e, colmin: %e\n", row_max, row_min, col_max, col_min);fflush(stdout);*/
scaleMatrixDevice <<< 4096, 256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), scale_vector.raw());
cudaCheckError();
ValueTypeB C_norm = sqrt(thrust::transform_reduce(A.values.begin(), A.values.begin() + A.get_num_nz() * A.get_block_size(), square_value<ValueTypeB>(), 0., thrust::plus<ValueTypeB>()) / nrows);
thrust::transform(A.values.begin(), A.values.begin() + A.get_num_nz()*A.get_block_size(), A.values.begin(), vmul_scale_const<ValueTypeB>(1. / C_norm) );
thrust::transform(scale_vector.begin(), scale_vector.end(), scale_vector.begin(), vmul_scale_const<ValueTypeB>(sqrt(1. / C_norm)) );
cudaCheckError();
/*thrust::fill(rownorms.begin(), rownorms.end(), 0.);
thrust::fill(colnorms.begin(), colnorms.end(), 0.);
getColRowNorms<<<4096,256>>>(nrows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), rownorms.raw(), colnorms.raw());
cudaCheckError();
row_max = *(thrust::max_element(rownorms.begin(), rownorms.end()));
row_min = *(thrust::min_element(rownorms.begin(), rownorms.end()));
col_max = *(thrust::max_element(colnorms.begin(), colnorms.end()));
col_min = *(thrust::min_element(colnorms.begin(), colnorms.end()));
cudaCheckError();
printf("Scaled Matrix: rowmax: %e, rowmin: %e, colmax: %e, colmin: %e\n", row_max, row_min, col_max, col_min);fflush(stdout);*/
exit(0);
}
// Setup on Host
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::setup(Matrix_h &A)
{
FatalError("Host not supported", AMGX_ERR_NOT_IMPLEMENTED);
/*if (A.is_matrix_distributed()) {
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
// move these out to config parameters?
const int max_iters = 10;
const ValueTypeB tolerance = 1e-10;
int rows = A.get_num_rows(), cols = A.get_num_cols();
// temporary vectors
VVector x(rows, 1), y(cols, 1), beta(rows, 0), gamma(cols, 0);
// perform matvecs to get beta and gamma (spmv for beta, spmvT for gamma)
computeBetaGammaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(),
x.raw(), y.raw(), beta.raw(), gamma.raw());
double std1 = 0., std2 = 0., sum1 = cols, sum2 = rows;
// calculate initial std1 and std2
for (int i=0; i<rows; i++) {
std1 += pow(x[i]*beta[i] - sum1, 2.0);
}
std1 = sqrt(std1 / rows) / sum1;
for (int i=0; i<cols; i++) {
std2 += pow(y[i]*gamma[i] - sum2, 2.0);
}
std2 = sqrt(std2 / cols) / sum2;
//printf("std1: %lg, std2: %lg\n",std1, std2);
double std_initial = sqrt((std1*std1)+(std2*std2));
double std = std_initial;
for (int t=0; t<max_iters; t++) {
if (std < tolerance) break; // finished
// x = sum1 ./ beta
for (int i=0; i<rows; i++) x[i] = ( isNotCloseToZero(beta[i]) ? sum1 / beta[i] : sum1 / epsilon(beta[i]) );
// gamma = C*x
computeGammaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), x.raw(), gamma.raw());
// gamma = 1 ./ beta
for (int i=0; i<cols; i++) y[i] = ( isNotCloseToZero(gamma[i]) ? sum2/gamma[i] : sum2 / epsilon(gamma[i]) );
// beta = B*y
computeBetaHost(rows, A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), y.raw(), beta.raw());
//ValueTypeB std_old = std;
std = 0.;
for (int i=0; i<rows; i++) {
std += pow(x[i]*beta[i] - sum1, 2.0);
}
std = sqrt(std / rows) / sum1;
// print it #, current error, convergence rate
//printf("ITER: %d %.3e %.4lg\n",t, std, std / std_old);
}
//Save scaling vectors for later user, setup complete
left_scale = VVector(beta);
right_scale= VVector(gamma);
*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::scaleMatrix(Matrix_h &A, ScaleDirection scaleOrUnscale)
{
FatalError("Host not supported", AMGX_ERR_NOT_IMPLEMENTED);
/*if (A.is_matrix_distributed()) {
FatalError("Binormalization scaling not supported for distributed matrices", AMGX_ERR_NOT_IMPLEMENTED);
}
if (left_scale.size() != A.get_num_rows()) {
FatalError("Must call setup(A) before binormalization scaling can scale matrix", AMGX_ERR_NOT_IMPLEMENTED);
}
// A_scaled = F*A*G (f = diag(F) = sqrt(fabs(x)), g = diag(G) = sqrt(fabs(y))
// A_ij = f_i * A_ij * g_j
scaleMatrixHost(A.get_num_rows(), A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), left_scale.raw(), right_scale.raw());*/
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_device, t_vecPrec, t_matPrec, t_indPrec> >::scaleVector(VVector &v, ScaleDirection scaleOrUnscale, ScaleSide leftOrRight)
{
if (scaleOrUnscale == amgx::SCALE)
{
thrust::transform(v.begin(), v.end(), this->scale_vector.begin(), v.begin(), vmul_scale<ValueTypeB>() );
}
else
{
thrust::transform(v.begin(), v.end(), this->scale_vector.begin(), v.begin(), vmul_unscale<ValueTypeB>() );
}
}
template <AMGX_VecPrecision t_vecPrec, AMGX_MatPrecision t_matPrec, AMGX_IndPrecision t_indPrec>
void BinormalizationScaler<TemplateConfig<AMGX_host, t_vecPrec, t_matPrec, t_indPrec> >::scaleVector(VVector &v, ScaleDirection scaleOrUnscale, ScaleSide leftOrRight)
{
FatalError("4x4 block size not supported", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE);
}
/****************************************
* Explict instantiations
***************************************/
#define AMGX_CASE_LINE(CASE) template class BinormalizationScaler_Base<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
#define AMGX_CASE_LINE(CASE) template class BinormalizationScaler<TemplateMode<CASE>::Type>;
AMGX_FORALL_BUILDS(AMGX_CASE_LINE)
#undef AMGX_CASE_LINE
} // namespace amgx
|
e6f5c21ea9f657f64972028f756d878981078a19.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "linalg/add.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveAddElemKernel(Type* out, const Type* in1, const Type* in2,
int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < len) {
out[idx] = in1[idx] + in2[idx];
}
}
template <typename Type>
void naiveAddElem(Type* out, const Type* in1, const Type* in2, int len) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
hipLaunchKernelGGL(( naiveAddElemKernel<Type>), dim3(nblks),dim3(TPB), 0, 0, out, in1, in2, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct AddInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const AddInputs<T>& dims) {
return os;
}
template <typename T>
class AddTest: public ::testing::TestWithParam<AddInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AddInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int len = params.len;
allocate(in1, len);
allocate(in2, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in1, len, T(-1.0), T(1.0));
r.uniform(in2, len, T(-1.0), T(1.0));
naiveAddElem(out_ref, in1, in2, len);
add(out, in1, in2, len);
add(in1, in1, in2, len);
CUDA_CHECK(hipGetDeviceCount(&device_count));
if (device_count > 1) {
T *h_in1 = (T *) malloc(len * sizeof(T));
T *h_in2 = (T *) malloc(len * sizeof(T));
updateHost(h_in1, in1, len);
updateHost(h_in2, in2, len);
addMGColSplitTest(h_in1, h_in2);
free(h_in1);
free(h_in2);
}
}
void addMGColSplitTest(T *h_in1, T *h_in2) {
int n_gpus = 2;
TypeMG<T> d_in1[n_gpus];
TypeMG<T> d_in2[n_gpus];
TypeMG<T> d_out[n_gpus];
for (int i = 0; i < n_gpus; i++) {
d_in1[i].gpu_id = i;
d_in2[i].gpu_id = i;
d_out[i].gpu_id = i;
CUDA_CHECK(hipSetDevice(d_in1[i].gpu_id));
CUDA_CHECK(hipStreamCreate(&(d_in1[i].stream)));
d_in2[i].stream = d_in1[i].stream;
d_out[i].stream = d_in1[i].stream;
}
int len = params.len;
allocateMG(d_in1, n_gpus, 1, len, true, true, false);
allocateMG(d_in2, n_gpus, 1, len, true, true, false);
allocateMG(d_out, n_gpus, 1, len, true, true, false);
updateDeviceMG(d_in1, h_in1, n_gpus, false);
updateDeviceMG(d_in1, h_in1, n_gpus, false);
addMG(d_out, d_in1, d_in2, len, n_gpus, false);
T *h_out = (T *) malloc(len * sizeof(T));
updateHostMG(h_out, d_out, n_gpus, false);
streamSyncMG(d_in1, n_gpus);
streamDestroyGPUs(d_in1, n_gpus);
freeMG(d_in1, n_gpus);
freeMG(d_in2, n_gpus);
freeMG(d_out, n_gpus);
allocate(out_2, len);
updateDevice(out_2, h_out, len);
free(h_out);
}
void TearDown() override {
CUDA_CHECK(hipFree(in1));
CUDA_CHECK(hipFree(in2));
CUDA_CHECK(hipFree(out_ref));
CUDA_CHECK(hipFree(out));
if (device_count > 1) {
CUDA_CHECK(hipFree(out_2));
}
}
protected:
AddInputs<T> params;
T *in1, *in2, *out_ref, *out, *out_2;
int device_count = 0;
};
const std::vector<AddInputs<float> > inputsf2 = {
{0.000001f, 1024*1024, 1234ULL}
};
const std::vector<AddInputs<double> > inputsd2 = {
{0.00000001, 1024*1024, 1234ULL}
};
typedef AddTest<float> AddTestF;
TEST_P(AddTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ref, in1, params.len,
CompareApprox<float>(params.tolerance)));
if (device_count > 1) {
ASSERT_TRUE(devArrMatch(out_ref, out_2, params.len,
CompareApprox<float>(params.tolerance)));
}
}
typedef AddTest<double> AddTestD;
TEST_P(AddTestD, Result){
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ref, in1, params.len,
CompareApprox<double>(params.tolerance)));
if (device_count > 1) {
ASSERT_TRUE(devArrMatch(out_ref, out_2, params.len,
CompareApprox<double>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(AddTests, AddTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(AddTests, AddTestD,
::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
e6f5c21ea9f657f64972028f756d878981078a19.cu
|
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include "linalg/add.h"
#include "random/rng.h"
#include "test_utils.h"
namespace MLCommon {
namespace LinAlg {
template <typename Type>
__global__ void naiveAddElemKernel(Type* out, const Type* in1, const Type* in2,
int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < len) {
out[idx] = in1[idx] + in2[idx];
}
}
template <typename Type>
void naiveAddElem(Type* out, const Type* in1, const Type* in2, int len) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
naiveAddElemKernel<Type><<<nblks,TPB>>>(out, in1, in2, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct AddInputs {
T tolerance;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream& operator<<(::std::ostream& os, const AddInputs<T>& dims) {
return os;
}
template <typename T>
class AddTest: public ::testing::TestWithParam<AddInputs<T> > {
protected:
void SetUp() override {
params = ::testing::TestWithParam<AddInputs<T>>::GetParam();
Random::Rng<T> r(params.seed);
int len = params.len;
allocate(in1, len);
allocate(in2, len);
allocate(out_ref, len);
allocate(out, len);
r.uniform(in1, len, T(-1.0), T(1.0));
r.uniform(in2, len, T(-1.0), T(1.0));
naiveAddElem(out_ref, in1, in2, len);
add(out, in1, in2, len);
add(in1, in1, in2, len);
CUDA_CHECK(cudaGetDeviceCount(&device_count));
if (device_count > 1) {
T *h_in1 = (T *) malloc(len * sizeof(T));
T *h_in2 = (T *) malloc(len * sizeof(T));
updateHost(h_in1, in1, len);
updateHost(h_in2, in2, len);
addMGColSplitTest(h_in1, h_in2);
free(h_in1);
free(h_in2);
}
}
void addMGColSplitTest(T *h_in1, T *h_in2) {
int n_gpus = 2;
TypeMG<T> d_in1[n_gpus];
TypeMG<T> d_in2[n_gpus];
TypeMG<T> d_out[n_gpus];
for (int i = 0; i < n_gpus; i++) {
d_in1[i].gpu_id = i;
d_in2[i].gpu_id = i;
d_out[i].gpu_id = i;
CUDA_CHECK(cudaSetDevice(d_in1[i].gpu_id));
CUDA_CHECK(cudaStreamCreate(&(d_in1[i].stream)));
d_in2[i].stream = d_in1[i].stream;
d_out[i].stream = d_in1[i].stream;
}
int len = params.len;
allocateMG(d_in1, n_gpus, 1, len, true, true, false);
allocateMG(d_in2, n_gpus, 1, len, true, true, false);
allocateMG(d_out, n_gpus, 1, len, true, true, false);
updateDeviceMG(d_in1, h_in1, n_gpus, false);
updateDeviceMG(d_in1, h_in1, n_gpus, false);
addMG(d_out, d_in1, d_in2, len, n_gpus, false);
T *h_out = (T *) malloc(len * sizeof(T));
updateHostMG(h_out, d_out, n_gpus, false);
streamSyncMG(d_in1, n_gpus);
streamDestroyGPUs(d_in1, n_gpus);
freeMG(d_in1, n_gpus);
freeMG(d_in2, n_gpus);
freeMG(d_out, n_gpus);
allocate(out_2, len);
updateDevice(out_2, h_out, len);
free(h_out);
}
void TearDown() override {
CUDA_CHECK(cudaFree(in1));
CUDA_CHECK(cudaFree(in2));
CUDA_CHECK(cudaFree(out_ref));
CUDA_CHECK(cudaFree(out));
if (device_count > 1) {
CUDA_CHECK(cudaFree(out_2));
}
}
protected:
AddInputs<T> params;
T *in1, *in2, *out_ref, *out, *out_2;
int device_count = 0;
};
const std::vector<AddInputs<float> > inputsf2 = {
{0.000001f, 1024*1024, 1234ULL}
};
const std::vector<AddInputs<double> > inputsd2 = {
{0.00000001, 1024*1024, 1234ULL}
};
typedef AddTest<float> AddTestF;
TEST_P(AddTestF, Result) {
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ref, in1, params.len,
CompareApprox<float>(params.tolerance)));
if (device_count > 1) {
ASSERT_TRUE(devArrMatch(out_ref, out_2, params.len,
CompareApprox<float>(params.tolerance)));
}
}
typedef AddTest<double> AddTestD;
TEST_P(AddTestD, Result){
ASSERT_TRUE(devArrMatch(out_ref, out, params.len,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_ref, in1, params.len,
CompareApprox<double>(params.tolerance)));
if (device_count > 1) {
ASSERT_TRUE(devArrMatch(out_ref, out_2, params.len,
CompareApprox<double>(params.tolerance)));
}
}
INSTANTIATE_TEST_CASE_P(AddTests, AddTestF,
::testing::ValuesIn(inputsf2));
INSTANTIATE_TEST_CASE_P(AddTests, AddTestD,
::testing::ValuesIn(inputsd2));
} // end namespace LinAlg
} // end namespace MLCommon
|
c9a19d7ffda8aad68e31e2acb06c3d5b8e06d949.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <sys/stat.h>
#include "../common/GL/glew.h"
//#include <GL/glew.h>
//#include <GL/glaux.h>
#include "hip/hip_runtime.h"
#include "cutil.h"
#include "cuda_gl_interop.h"
#include "..\GLKLib\GLK.h"
#include "PMBody.h"
#include "LDNIcpuSolid.h"
#include "LDNIcudaSolid.h"
#include "LDNIcudaOperation.h"
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
//--------------------------------------------------------------------------------------------
extern __global__ void LDNIDistanceField_CountBitInInteger(unsigned int *d_index, int nodeNum, int res);
extern __global__ void LDNIDistanceField_CountBitInArray(unsigned int *d_index, unsigned int *m_3dArray, int nodeNum, int res);
extern __global__ void LDNIDistanceField__writeTexToVBO(float3 *d_output, int res, int* table_index, float width, float3 origin, int nodeNum);
extern __global__ void LDNIDistanceField__writeTexToArray(unsigned short *d_output, int res, unsigned int *table_index, unsigned int* temp_index, int nodeNum);
extern __global__ void LDNIDistanceField__writeArrayToVBO(float3 *d_output, int res, unsigned int* table_index, unsigned int *m_3dArray, float width, float3 origin, int nodeNum);
extern __global__ void LDNIDistanceField__Sort2DArray(unsigned short *d_output, unsigned int *d_index, int res, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInYByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *site_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__FilterProbablySiteInYByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInXByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *site_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__FilterProbablySiteInXByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInX(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, ushort2 *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInY(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInXLoop(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, ushort2 *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum, short loopID);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInYLoop(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum, short loopID);
extern __global__ void LDNIDistanceField__CountProbablySiteInY(unsigned int *bitDeleted, unsigned int *counter, int res, int nodeNum);
extern __global__ void LDNIDistanceField__SortProbablySite(unsigned int *sites, unsigned int *sites_index, int res, int nodeNum);
extern __global__ void LDNIDistanceField__SortProbablySite2(unsigned int *sites, unsigned int *sites_index, int res, int nodeNum);
extern __global__ void LDNIDistanceField__GetSiteByDist(ushort3 *sites, unsigned int *counter, unsigned int *sites_index, unsigned int *sites_off, int offdist, int res, int nodeNum);
extern __global__ void LDNIDistanceField__writeSitesToVBO(float3 *d_output, int res, unsigned int *counter, unsigned int* d_input, float width, float3 origin, int nodeNum);
extern __global__ void LDNIDistanceField__Test(float3 *d_output, int res, unsigned int *counter, ushort2 *site, unsigned int* site_index, float width, float3 origin, int nodeNum);
extern __global__ void LDNIDistanceField__GetProbablySiteInY(unsigned int *bitDeleted, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, unsigned short *sites_x, unsigned int *sites_index_x, int3 res, int nodeNum);
extern __global__ void LDNIDistanceField__GetProbablySiteInX(unsigned int *bitDeleted, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, unsigned int *sites_in, unsigned int *sites_index_in, int3 res, int nodeNum);
extern __global__ void LDNIDistanceField__MaurerAxisInY(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__MaurerAxisInX(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_2(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_4(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_8(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_16(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_32(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_2(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_4(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_8(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_16(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_32(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__countArrayToVBO(int3 res, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, int offdist, int nodeNum);
extern __global__ void LDNIDistanceField__writeResultToVBO(float3 *d_output, int3 res, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, int offdist, float width, float3 origin, int nodeNum);
//-----------------------------PBA Distance Field---------------------------------------------------------------
extern __global__ void PBADistanceField__writeTexToArray(int *d_output, int res, int nodeNum, unsigned int* counter);
extern __global__ void PBADistanceField_kernelFloodZ(int *output, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelPropagateInterband(int *output, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelUpdateVertical(int *output, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelMaurerAxis(int *stack, int size, int mod, int bandSize, int test);
extern __global__ void PBADistanceField_kernelMergeBands(int *stack, int *forward, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelCreateForwardPointers(int *output, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelColorAxis(int *output, int size);
extern __global__ void PBADistanceField_kernelTransposeXY(int *data, int log2Width, int mask);
extern __global__ void PBADistanceField__writeArrayToVBO(float3 *d_output, int res, unsigned int* counter, int *outputDF, int offdist, float width, float3 origin, int nodeNum);
extern __global__ void PBADistanceField__countArrayToVBO(int res, unsigned int* counter, int *outputDF, int offdist, int nodeNum);
extern __global__ void PBADistanceField__writeCompactArray(int *d_output, int *d_input, unsigned int *counter, int nodeNum);
//--------------------------------------------------------------------------------------------
extern __device__ unsigned int bitCount(unsigned int i);
extern __device__ unsigned int GetFirstBitPos(unsigned int source);
extern __device__ unsigned int GetLastBitPos(unsigned int source);
extern __device__ unsigned int SetBitPos(unsigned int pos);
extern __device__ float interpointY(int x1, int y1, int z1, int x2, int y2, int z2, int x0, int z0);
extern __device__ bool GetBitPos(unsigned int pos, unsigned int source);
extern __device__ unsigned int Reversebit(unsigned int v);
extern __device__ int middlepointY(unsigned int site1, unsigned int site2, int z0);
extern __device__ int middlepointX(unsigned int site1, unsigned int site2, int y0, int z0);
//texture<unsigned int> site_tex;
texture<uint4,3> site_tex;
#define BANDWIDTH 32
#define MAX_INT 201326592
#define PBAMARKER -1
#define INFINITY 0x3ff
#define TOID(x, y, z, w) (__mul24(__mul24(z, w) + (y), w) + (x))
#define TOID_CPU(x, y, z, w) ((z) * (w) * (w) + (y) * (w) + (x))
#define ENCODE(x, y, z) (((x) << 20) | ((y) << 10) | (z))
#define DECODE(value, x, y, z) \
x = (value) >> 20; \
y = ((value) >> 10) & 0x3ff; \
z = (value) & 0x3ff
#define GET_X(value) ((value) >> 20)
#define GET_Y(value) (((value) >> 10) & 0x3ff)
#define GET_Z(value) (((value) == PBAMARKER) ? MAX_INT : ((value) & 0x3ff))
#define ROTATEXY(x) ((((x) & 0xffc00) << 10) | \
(((x) & 0x3ff00000) >> 10) | \
((x) & 0x3ff))
#define BLOCKX 32
#define BLOCKY 4
#define BLOCKXY 16
#define GET_STACK(value) ((value >> 16) & 0xffff)
#define GET_PTR(value) ((value) & 0xffff)
#define ENCODE_STACK(a, b) (((a) << 16) | (b & 0xffff))
#define ENCODE_STACK_3(a, b, c) (((a) << 20) | ((b) << 10) | (c & 0x3ff))
#define ENCODE_PTR(value, b) ((value & 0xffff0000) | (b & 0xffff))
#define ENCODE_Z(value, z) ((value & 0xfffffC00) | (z & 0x3ff))
texture<int> pbaTexColor;
texture<int> pbaTexLinks;
//texture<short> pbaTexPointer;
texture<int> pbaTexPointer;
void LDNIcudaOperation::PBADistanceFieldGeneration(QuadTrglMesh *mesh, GLuint *vbo, unsigned int &vbosize, int res, int offdist, float boundingBox[])
{
if (res > 512) return;
int fboSize = res;
int nVertices;
int phase1Band = 16;
int phase2Band = 16;
int phase3Band = 2;
int **pbaTextures;
int pbaMemSize;
int pbaCurrentBuffer;
int pbaTexSize;
pbaTextures = (int **) malloc(2 * sizeof(int *));
pbaTexSize = fboSize;
pbaMemSize = pbaTexSize * pbaTexSize * pbaTexSize * sizeof(int);
CUDA_SAFE_CALL(hipMalloc((void **) &pbaTextures[0], pbaMemSize));
CUDA_SAFE_CALL(hipMalloc((void **) &pbaTextures[1], pbaMemSize));
// PBA initialization
if (!PBADistanceField_SitesGeneration(mesh, vbo, nVertices, res, boundingBox, pbaTextures[0]))
return;
pbaCurrentBuffer = 0;
// Read sites to CPU
int *sites;
printf("Start %d \n", nVertices);
unsigned int* counter;
CUDA_SAFE_CALL(hipMalloc((void**) &counter, sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( counter, 0, sizeof(unsigned int)) );
CUDA_SAFE_CALL(hipMalloc((void**) &sites, nVertices*sizeof(int)));
CUDA_SAFE_CALL(hipMemset( sites, 0, nVertices*sizeof(int)) );
hipLaunchKernelGGL(( PBADistanceField__writeCompactArray), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, sites, pbaTextures[0], counter, res*res*res);
int* cpu_sites = (int*)malloc(nVertices*sizeof(int));
CUDA_SAFE_CALL(hipMemcpy( cpu_sites, sites, nVertices*sizeof(int),hipMemcpyDeviceToHost));
printf("End\n");
// Compute the 3D distance field
/************* Compute Z axis *************/
// --> (X, Y, Z)
pbaCurrentBuffer = PBADistanceField_pba3DColorZAxis(pbaTextures, res, phase1Band, pbaCurrentBuffer);
/************* Compute Y axis *************/
// --> (X, Y, Z)
pbaCurrentBuffer = PBADistanceField_pba3DComputeProximatePointsYAxis(pbaTextures, res, phase2Band, pbaCurrentBuffer, 0);
pbaCurrentBuffer = PBADistanceField_pba3DColorYAxis(pbaTextures, res, phase3Band, pbaCurrentBuffer);
// --> (Y, X, Z)
PBADistanceField_pba3DTransposeXY(pbaTextures[pbaCurrentBuffer], res);
hipDeviceSynchronize();
printf("starting X ==================================\n");
/************** Compute X axis *************/
// Compute X axis
pbaCurrentBuffer = PBADistanceField_pba3DComputeProximatePointsYAxis(pbaTextures, res, phase2Band, pbaCurrentBuffer, 1);
pbaCurrentBuffer = PBADistanceField_pba3DColorYAxis(pbaTextures, res, phase3Band, pbaCurrentBuffer);
// --> (Y, X, Z)
PBADistanceField_pba3DTransposeXY(pbaTextures[pbaCurrentBuffer], res);
hipFree(sites);
hipFree(pbaTextures[1-pbaCurrentBuffer]);
char inputStr[10];
printf("\Check Error (very slow)? (y/n): ");
scanf("%s",inputStr);
if (inputStr[0]=='y' || inputStr[0]=='Y')
{
PBADistanceField_CompareResult(pbaTextures[pbaCurrentBuffer], res, nVertices, cpu_sites);
}
free(cpu_sites);
// Generate Offset & display
cudaGraphicsResource *resource;
float gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
float width = gWidth*(float)res;
float origin[3];
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
//unsigned int* counter;
//CUDA_SAFE_CALL(hipMalloc((void**) &counter, sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( counter, 0, sizeof(unsigned int)) );
hipLaunchKernelGGL(( PBADistanceField__countArrayToVBO), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, res, counter, pbaTextures[pbaCurrentBuffer], offdist, res*res*res);
CUDA_SAFE_CALL(hipMemcpy( &vbosize, counter, sizeof(unsigned int),hipMemcpyDeviceToHost));
printf("size ---------- %ld \n", vbosize);
if (vbosize <= 0)
{
printf("Error in PBA Distance Computation !!! \n");
hipFree(pbaTextures[0]);
hipFree(pbaTextures[1]);
hipFree(counter);
free(pbaTextures);
return;
}
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, vbosize*3*sizeof(float), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CUDA_SAFE_CALL(hipGraphicsGLRegisterBuffer(&resource, *vbo, hipGraphicsRegisterFlagsWriteDiscard));
CUDA_SAFE_CALL(hipGraphicsMapResources(1, &resource, 0));
size_t num_bytes;
float3 *dptr;
CUDA_SAFE_CALL(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, resource));
CUDA_SAFE_CALL(hipMemset( counter, 0, sizeof(unsigned int)) );
hipLaunchKernelGGL(( PBADistanceField__writeArrayToVBO), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, dptr, res, counter, pbaTextures[pbaCurrentBuffer], offdist, width, make_float3(origin[0],origin[1],origin[2]), res*res*res);
CUDA_SAFE_CALL(hipGraphicsUnmapResources(1, &resource, 0));
printf("CUDA mapped VBO: VBO Size %ld bytes\n", vbosize);
hipFree(pbaTextures[pbaCurrentBuffer]);
free(pbaTextures);
hipFree(counter);
}
int LDNIcudaOperation::PBADistanceField_pba3DColorZAxis(int **pbaTextures, int res, int m1, int cbuffer)
{
int pbaCurrentBuffer = cbuffer;
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((res / block.x) * m1, res / block.y);
hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]);
hipLaunchKernelGGL(( PBADistanceField_kernelFloodZ), dim3(grid), dim3(block) , 0, 0, pbaTextures[1 - pbaCurrentBuffer], res, res / block.x, res / m1);
pbaCurrentBuffer = 1 - pbaCurrentBuffer;
if (m1 > 1) {
// Passing information between bands
hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]);
hipLaunchKernelGGL(( PBADistanceField_kernelPropagateInterband), dim3(grid), dim3(block) , 0, 0, pbaTextures[1 - pbaCurrentBuffer], res, res / block.x, res / m1);
hipBindTexture(0, pbaTexLinks, pbaTextures[1 - pbaCurrentBuffer]);
hipLaunchKernelGGL(( PBADistanceField_kernelUpdateVertical), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaCurrentBuffer], res, res / block.x, res / m1);
}
return pbaCurrentBuffer;
}
int LDNIcudaOperation::PBADistanceField_pba3DComputeProximatePointsYAxis(int **pbaTextures, int res, int m2, int cbuffer, int test)
{
int pbaCurrentBuffer = cbuffer;
int iStack = 1 - pbaCurrentBuffer;
int iForward = pbaCurrentBuffer;
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((res / block.x) * m2, res / block.y);
//printf("forward %d %d \n",iStack, iForward);
// Compute proximate points locally in each band
hipBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]);
hipLaunchKernelGGL(( PBADistanceField_kernelMaurerAxis), dim3(grid), dim3(block) , 0, 0, pbaTextures[iStack], res, res / block.x, res / m2, test);
//hipDeviceSynchronize();
// Construct forward pointers
hipBindTexture(0, pbaTexLinks, pbaTextures[iStack]);
hipLaunchKernelGGL(( PBADistanceField_kernelCreateForwardPointers), dim3(grid), dim3(block) , 0, 0, pbaTextures[iForward], res, res / block.x, res / m2);
//
hipBindTexture(0, pbaTexPointer, pbaTextures[iForward]);
// Repeatly merging two bands into one
for (int noBand = m2; noBand > 1; noBand /= 2) {
grid = dim3((res / block.x) * (noBand / 2), res / block.y);
hipLaunchKernelGGL(( PBADistanceField_kernelMergeBands), dim3(grid), dim3(block) , 0, 0, pbaTextures[iStack],
pbaTextures[iForward], res, res / block.x, res / noBand);
//printf("test %d %d %d %d\n", iForward, iStack, m2);
//break;
}
hipUnbindTexture(pbaTexLinks);
hipUnbindTexture(pbaTexColor);
hipUnbindTexture(pbaTexPointer);
return pbaCurrentBuffer;
}
int LDNIcudaOperation::PBADistanceField_pba3DColorYAxis(int **pbaTextures, int res, int m3, int cbuffer)
{
int pbaCurrentBuffer = cbuffer;
dim3 block = dim3(BLOCKX, m3);
dim3 grid = dim3(res / block.x, res);
hipBindTexture(0, pbaTexColor, pbaTextures[1 - pbaCurrentBuffer]);
hipLaunchKernelGGL(( PBADistanceField_kernelColorAxis), dim3(grid), dim3(block) , 0, 0, pbaTextures[pbaCurrentBuffer], res);
hipUnbindTexture(pbaTexColor);
return pbaCurrentBuffer;
}
void LDNIcudaOperation::PBADistanceField_pba3DTransposeXY(int *&inputDF, int res)
{
dim3 block(BLOCKXY, BLOCKXY);
dim3 grid((res / BLOCKXY) * res, res / BLOCKXY);
int log2Width;
int tmp = res;
log2Width = 0;
while (tmp > 1) { tmp /= 2; log2Width++; }
hipLaunchKernelGGL(( PBADistanceField_kernelTransposeXY), dim3(grid), dim3(block) , 0, 0, inputDF, log2Width, res - 1);
}
void LDNIcudaOperation::DistanceFieldGeneration(QuadTrglMesh *mesh, GLuint *vbo, unsigned int &vbosize, int res, int offdist, float boundingBox[])
{
int arrsize = res*res;
unsigned int* sites_index;
unsigned short *sites;
int siteNum;
LDNIDistanceField_SitesGeneration(mesh, vbo, siteNum, res, boundingBox, sites_index, sites);
if (siteNum <= 0)
{
hipFree(sites);
hipFree(sites_index);
return ;
}
//check whether the sites on each ray are sorted (Just for in case, should be sorted during the writing kernel)
//LDNIDistanceField__Sort2DArray<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites, sites_index, res, res*res);
//LDNIDistanceField__GenerateProbablySiteInYByGivenDistance<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitSites, sites, sites_index, res, offdist, res*res*res);
//LDNIDistanceField__GenerateProbablySiteInXByGivenDistance<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitSites, sites, sites_index, res, offdist, res*res*res);
//LDNIDistanceField__FilterProbablySiteInYByGivenDistance<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitSites, sites, sites_index, res, offdist-1, res*res*res);
//LDNIDistanceField__FilterProbablySiteInXByGivenDistance<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitSites, sites, sites_index, res, offdist-1, res*res*res);
long time = clock();
unsigned int* bitDeleted;
int bitsize = res*res*(res/32);
CUDA_SAFE_CALL(hipMalloc((void**) & bitDeleted, bitsize*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( bitDeleted, 0, bitsize*sizeof(unsigned int)) );
hipLaunchKernelGGL(( LDNIDistanceField__MaurerAxisInY), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites, sites_index, make_int3(res, res/BANDWIDTH, res), offdist, res*(res/BANDWIDTH)*res);
hipDeviceSynchronize();
if (res > 32)
{
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsY_2), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites, sites_index, make_int3(res, res/2, res), offdist, 2, 16, res*(res/2)*res);
//printf("Y-32\n");
}
if (res > 64)
{
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsY_4), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites, sites_index, make_int3(res, res/4, res), offdist, 4, 8, res*(res/4)*res);
//printf("Y-64\n");
}
if (res > 128){
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsY_8), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites, sites_index, make_int3(res, res/8, res), offdist, 8, 4, res*(res/8)*res);
//printf("Y-128\n");
}
if (res > 256)
{
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsY_16), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites, sites_index, make_int3(res, res/16, res), offdist, 16, 2, res*(res/16)*res);
//printf("Y-256\n");
}
if (res > 512)
{
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsY_32), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites, sites_index, make_int3(res, res/32, res), offdist, 32, 1, res*(res/32)*res);
//printf("Y-512\n");
}
hipDeviceSynchronize();
printf("time 1 : %ld(ms) \n", clock()-time); time = clock();
unsigned int* sites_index_y;
unsigned int* numofBit = (unsigned int*)malloc(sizeof(unsigned int));
CUDA_SAFE_CALL(hipMalloc((void**) &sites_index_y, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( sites_index_y, 0, (arrsize+1)*sizeof(unsigned int)));
hipLaunchKernelGGL(( LDNIDistanceField__CountProbablySiteInY), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites_index_y, res, res*res);
thrust::device_ptr<unsigned int> dev_ptr(sites_index_y); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
numofBit[0]=dev_ptr[arrsize];
hipDeviceSynchronize();
printf("time 2 : %ld(ms) \n", clock()-time); time = clock();
printf("Get Sites in Y : %d \n", numofBit[0]);
unsigned int* sites_y;
unsigned int* temp2D;
CUDA_SAFE_CALL(hipMalloc((void**) &sites_y, numofBit[0]*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( sites_y, 0, numofBit[0]*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMalloc((void**) &temp2D, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( temp2D, 0, (arrsize+1)*sizeof(unsigned int)));
hipLaunchKernelGGL(( LDNIDistanceField__GetProbablySiteInY), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, temp2D, sites_y, sites_index_y, sites, sites_index, make_int3(res, res/BANDWIDTH, res), res*(res/BANDWIDTH)*res);
hipFree(temp2D);
hipFree(sites_index);
hipFree(sites);
CUDA_SAFE_CALL(hipMemset( bitDeleted, 0, bitsize*sizeof(unsigned int)) );
//LDNIDistanceField__SortProbablySite<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites_y, sites_index_y, res, res*res);
printf("time 3 : %ld(ms) \n", clock()-time); time = clock();
/* //for debugging
thrust::device_ptr<unsigned int> dev_ptr2(temp2D); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr2, dev_ptr2+(arrsize+1), dev_ptr2); // in-place scan
numofBit[0]=dev_ptr2[arrsize];
printf("Proved Sites in Y : %d \n", numofBit[0]);*/
//-------------------------------X direction---------------------------------------//
hipLaunchKernelGGL(( LDNIDistanceField__MaurerAxisInX), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites_y, sites_index_y, make_int3(res, res/BANDWIDTH, res), offdist, res*(res/BANDWIDTH)*res);
hipDeviceSynchronize();
printf("time 4 : %ld(ms) \n", clock()-time); time = clock();
if (res > 32)
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsX_2), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites_y, sites_index_y, make_int3(res, res/2, res), offdist, 2, 16, res*(res/2)*res);
if (res > 64)
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsX_4), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites_y, sites_index_y, make_int3(res, res/4, res), offdist, 4, 8, res*(res/4)*res);
if (res > 128)
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsX_8), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites_y, sites_index_y, make_int3(res, res/8, res), offdist, 8, 4, res*(res/8)*res);
if (res > 256)
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsX_16), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites_y, sites_index_y, make_int3(res, res/16, res), offdist, 16, 2, res*(res/16)*res);
if (res > 512)
hipLaunchKernelGGL(( LDNIDistanceField__kernelMergeBandsX_32), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites_y, sites_index_y, make_int3(res, res/32, res), offdist, 32, 1, res*(res/32)*res);
hipDeviceSynchronize();
printf("time 5 : %ld(ms) \n", clock()-time); time = clock();
unsigned int* sites_index_x;
CUDA_SAFE_CALL(hipMalloc((void**) &sites_index_x, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( sites_index_x, 0, (arrsize+1)*sizeof(unsigned int)));
hipLaunchKernelGGL(( LDNIDistanceField__CountProbablySiteInY), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, sites_index_x, res, res*res);
thrust::device_ptr<unsigned int> dev_ptr2(sites_index_x); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr2, dev_ptr2+(arrsize+1), dev_ptr2); // in-place scan
numofBit[0]=dev_ptr2[arrsize];
hipDeviceSynchronize();
printf("time 6 : %ld(ms) \n", clock()-time); time = clock();
printf("Get Sites in X : %d \n", numofBit[0]);
unsigned int* sites_x;
CUDA_SAFE_CALL(hipMalloc((void**) &sites_x, numofBit[0]*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( sites_x, 0, numofBit[0]*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMalloc((void**) &temp2D, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( temp2D, 0, (arrsize+1)*sizeof(unsigned int)));
hipLaunchKernelGGL(( LDNIDistanceField__GetProbablySiteInX), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, bitDeleted, temp2D, sites_x, sites_index_x, sites_y, sites_index_y, make_int3(res, res/BANDWIDTH, res), res*(res/BANDWIDTH)*res);
//for debugging
/*thrust::device_ptr<unsigned int> dev_ptr3(temp2D); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr3, dev_ptr3+(arrsize+1), dev_ptr3); // in-place scan
numofBit[0]=dev_ptr3[arrsize];
printf("Proved Sites in Y : %d \n", numofBit[0]);*/
hipDeviceSynchronize();
printf("time 7 : %ld(ms) \n", clock()-time); time = clock();
hipFree(temp2D);
hipFree(sites_index_y);
hipFree(sites_y);
hipFree(bitDeleted);
//LDNIDistanceField__SortProbablySite2<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites_x, sites_index_x, res, res*res);
//-------------------------------Get Sites for Rendering---------------------------------------//
//Display
cudaGraphicsResource *resource;
float gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
float width = gWidth*(float)res;
float origin[3];
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
unsigned int* counter;
CUDA_SAFE_CALL(hipMalloc((void**) &counter, sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( counter, 0, sizeof(unsigned int)));
hipLaunchKernelGGL(( LDNIDistanceField__countArrayToVBO), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, make_int3(res, res, res), counter, sites_x, sites_index_x, offdist, res*res*res);
CUDA_SAFE_CALL(hipMemcpy( numofBit, counter, sizeof(unsigned int),hipMemcpyDeviceToHost));
//vbosize = LDNIDistanceField_ReadArrayToVBO(rr, vbo, bitDeleted, res, width, origin);
//-----------------------------------------------------------------------------------//
printf("Final Site %d \n", numofBit[0]);
if (numofBit[0] <= 0)
{
hipFree(bitDeleted);
hipFree(sites_index_x);
hipFree(sites_x);
hipFree(counter);
return;
}
vbosize = numofBit[0];
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, numofBit[0]*3*sizeof(float), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CUDA_SAFE_CALL(hipGraphicsGLRegisterBuffer(&resource, *vbo, hipGraphicsRegisterFlagsWriteDiscard));
CUDA_SAFE_CALL(hipGraphicsMapResources(1, &resource, 0));
size_t num_bytes;
float3 *dptr;
CUDA_SAFE_CALL(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, resource));
CUDA_SAFE_CALL(hipMemset( counter, 0, sizeof(unsigned int)) );
hipLaunchKernelGGL(( LDNIDistanceField__writeResultToVBO), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, dptr, make_int3(res, res, res), counter, sites_x, sites_index_x, offdist, width, make_float3(origin[0],origin[1],origin[2]), res*res*res);
CUDA_SAFE_CALL(hipMemcpy( &vbosize, counter, sizeof(unsigned int),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipGraphicsUnmapResources(1, &resource, 0));
printf("CUDA mapped VBO: VBO Size %ld %ld bytes\n", vbosize, numofBit[0]);
//-----------------------------------------------------------------------------------//
hipFree(counter);
hipFree(bitDeleted);
hipFree(sites_index_x);
hipFree(sites_x);
}
int LDNIcudaOperation::LDNIDistanceField_ReadArrayToVBO(cudaGraphicsResource *resource, GLuint *vbo, unsigned int *m_3dArray, int res, float width, float origin[3])
{
unsigned int* countVertex;
CUDA_SAFE_CALL(hipMalloc((void**) & countVertex,sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( countVertex, 0, sizeof(unsigned int)) );
// Declare Host Variable
int* vbo_size = (int*)malloc(sizeof(int));
//Step 1 : Find out the size of VBO
hipLaunchKernelGGL(( LDNIDistanceField_CountBitInArray), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, countVertex, m_3dArray, res*res*(res/32), res);
CUDA_SAFE_CALL(hipMemcpy( vbo_size, countVertex, sizeof(unsigned int),hipMemcpyDeviceToHost));
printf("Distance Offset: VBO Size %ld bytes\n", vbo_size[0]);
if (vbo_size[0] <= 0) return 0;
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, vbo_size[0]*3*sizeof(float), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CUDA_SAFE_CALL(hipGraphicsGLRegisterBuffer(&resource, *vbo, hipGraphicsRegisterFlagsWriteDiscard));
CUDA_SAFE_CALL(hipGraphicsMapResources(1, &resource, 0));
size_t num_bytes;
float3 *dptr;
CUDA_SAFE_CALL(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, resource));
CUDA_SAFE_CALL(hipMemset( countVertex, 0, sizeof(int)) );
hipLaunchKernelGGL(( LDNIDistanceField__writeArrayToVBO), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, dptr, res, countVertex, m_3dArray, width, make_float3(origin[0],origin[1],origin[2]), res*res*(res/32));
CUDA_SAFE_CALL(hipMemcpy( vbo_size, countVertex, sizeof(int),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipGraphicsUnmapResources(1, &resource, 0));
printf("CUDA mapped VBO: VBO Size %ld bytes\n", vbo_size[0]);
hipFree(countVertex);
return vbo_size[0];
}
int LDNIcudaOperation::LDNIDistanceField_Read3DTextureToVBO(cudaGraphicsResource *resource, GLuint* vbo, int res, float width, float origin[3])
{
/*int* countVertex;
CUDA_SAFE_CALL(hipMalloc((void**) & countVertex,sizeof(int)));
CUDA_SAFE_CALL(hipMemset( countVertex, 0, sizeof(int)) );
// Declare Host Variable
int* vbo_size = (int*)malloc(sizeof(int));
//Step 1 : Find out the size of VBO
hipLaunchKernelGGL(( LDNIDistanceField_CountBitInInteger), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, countVertex, res*res*(res/128), res);
CUDA_SAFE_CALL(hipMemcpy( vbo_size, countVertex, sizeof(int),hipMemcpyDeviceToHost));
printf("CUDA mapped VBO: VBO Size %ld bytes\n", vbo_size[0]);
if (vbo_size[0] <= 0) return 0;
//Step 2 : Create the VBO
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, vbo_size[0]*3*sizeof(float), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CUDA_SAFE_CALL(hipGraphicsGLRegisterBuffer(&resource, *vbo, hipGraphicsRegisterFlagsWriteDiscard));
//Step 3 : Write VBO
CUDA_SAFE_CALL(hipGraphicsMapResources(1, &resource, 0));
size_t num_bytes;
float3 *dptr;
CUDA_SAFE_CALL(hipGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, resource));
CUDA_SAFE_CALL(hipMemset( countVertex, 0, sizeof(int)) );
LDNIDistanceField__writeTexToVBO<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(dptr, res, countVertex, width, make_float3(origin[0],origin[1],origin[2]), res*res*(res/128));
CUDA_SAFE_CALL(hipMemcpy( vbo_size, countVertex, sizeof(int),hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipGraphicsUnmapResources(1, &resource, 0));
printf("CUDA mapped VBO: VBO Size %ld bytes\n", vbo_size[0]);
hipFree(countVertex);
return vbo_size[0];*/
return 0;
}
bool LDNIcudaOperation::PBADistanceField_SitesGeneration(QuadTrglMesh *mesh, GLuint *vbo, int &vbosize, int res, float boundingBox[], int *&inputDF)
{
const bool bCube=true;
float origin[3],gWidth, width; long time=clock(),totalTime=clock();
int i,nodeNum;
char fileadd[256];
if ((boundingBox[0]==boundingBox[1]) && (boundingBox[2]==boundingBox[3]) && (boundingBox[4]==boundingBox[5])) {
mesh->CompBoundingBox(boundingBox);
if (bCube) {
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
}
}
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
width = gWidth*(float)res;
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
if (glewIsSupported("GL_VERSION_2_0")) {printf("\nReady for OpenGL 2.0\n");} else {printf("OpenGL 2.0 not supported\n"); return false;}
//-----------------------------------------------------------------------------------------
int dispListIndex; GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
GLuint vertexTexture;
const char *VshaderString[1],*GshaderString[1],*FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
char str[4096] = ""; int xF,yF;
short nAxis;
GLenum buffers[16] = {GL_COLOR_ATTACHMENT0_EXT, GL_COLOR_ATTACHMENT1_EXT, GL_COLOR_ATTACHMENT2_EXT, GL_COLOR_ATTACHMENT3_EXT
, GL_COLOR_ATTACHMENT4_EXT, GL_COLOR_ATTACHMENT5_EXT, GL_COLOR_ATTACHMENT6_EXT, GL_COLOR_ATTACHMENT7_EXT
, GL_COLOR_ATTACHMENT8_EXT, GL_COLOR_ATTACHMENT9_EXT, GL_COLOR_ATTACHMENT10_EXT, GL_COLOR_ATTACHMENT11_EXT
, GL_COLOR_ATTACHMENT12_EXT, GL_COLOR_ATTACHMENT13_EXT, GL_COLOR_ATTACHMENT14_EXT, GL_COLOR_ATTACHMENT15_EXT};
//-----------------------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\sampleLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\newSampleLDNIGShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\voxelLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Fragment Shader Compile Error\n\n %s", str); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL! 1 \n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// texture setting for fragment shader
//memset(fileadd,0,256*sizeof(char));
//strcat(fileadd, "Outdata");
int maxColorBuffers, maxTextureSize;
int layer = res/128;
glGetIntegerv( GL_MAX_COLOR_ATTACHMENTS_EXT, &maxColorBuffers );
glGetIntegerv( GL_MAX_3D_TEXTURE_SIZE_EXT, &maxTextureSize );
int z_tile = ceil(layer/(float)maxColorBuffers);
printf("max texture size %d %d\n", maxTextureSize, layer);
char value[10];
for(i=0; i < min(layer, maxColorBuffers); i++){
memset(fileadd,0,256*sizeof(char));
strcat(fileadd, "Outdata");
value[0] = '\0';
sprintf(value, "%d", i+1 );
strcat(fileadd, value);
glBindFragDataLocationEXT(g_programObj,i,fileadd);
}
int tilesize = min(layer, maxColorBuffers)*128;
//-------------------------------------------------------------------------------
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating texture for vertex array and binding
long texBindingTime=clock();
glGetError(); // for clean-up the error generated before
nodeNum=mesh->GetNodeNumber(); _texCalProduct(nodeNum,xF,yF);
int temp;
for(temp=1;temp<xF;temp *= 2) {}
xF = temp; //if (xF<64) xF=64;
yF = (int)(nodeNum/xF)+1; if (yF<64) yF=64;
printf("Texture Size: xF=%d yF=%d\n",xF,yF);
float* verTex=(float*)malloc(xF*yF*3*sizeof(float));
memset(verTex,0,xF*yF*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glGenTextures(1, &vertexTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, vertexTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB32F_ARB, xF, yF, 0, GL_RGB, GL_FLOAT, verTex);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, 0);
free(verTex);
if (glGetError()!=GL_NO_ERROR) printf("Error: GL_TEXTURE_RECTANGLE_ARB texture binding!\n\n");
texBindingTime=clock()-texBindingTime;
printf("\nTime for binding texture onto the graphics memory - %ld (ms)\n\n",texBindingTime);
//-----------------------------------------------------------------------------------------
// Step 3: building GL-list for activating the geometry shader
unsigned int ver[4];
int faceNum=mesh->GetFaceNumber();
dispListIndex = glGenLists(1);
glNewList(dispListIndex, GL_COMPILE);
glBegin(GL_POINTS);
for(i=0;i<faceNum;i++) {
mesh->GetFaceNodes(i+1,ver[0],ver[1],ver[2],ver[3]);
glVertex3i(ver[0]-1,ver[1]-1,ver[2]-1);
if (mesh->IsQuadFace(i+1)) {glVertex3i(ver[0]-1,ver[2]-1,ver[3]-1);} // one more triangle
}
glEnd();
glEndList();
//-----------------------------------------------------------------------------------------
// Step 4: using program objects and the texture
GLint id0,id1,id2,id3,id4; float centerPos[3];
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB,vertexTexture);
glUseProgramObjectARB(g_programObj);
id0 = glGetUniformLocationARB(g_programObj,"sizeNx");
glUniform1iARB(id0,xF);
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
id1 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id1,centerPos[0],centerPos[1],centerPos[2]);
id2 = glGetUniformLocationARB(g_programObj,"res");
glUniform1iARB(id2,res);
id3 = glGetUniformLocationARB(g_programObj,"tilesize");
glUniform1iARB(id3,tilesize);
if (glGetError()!=GL_NO_ERROR) printf("Error: vertex texture binding!\n\n");
printf("Create shader texture\n");
//-----------------------------------------------------------------------------------------
// Step 5: Prepare 3D texture for voxelization
GLuint PrimitiveVoxel[3];
glEnable(GL_TEXTURE_3D_EXT);
glGenTextures(1, &PrimitiveVoxel[0]); // x-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[0]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
//if res <= 2048 , create texture directly. Otherwise, need to subdivide the texture
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glGenTextures(1, &PrimitiveVoxel[1]); // y-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[1]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glGenTextures(1, &PrimitiveVoxel[2]); // z-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[2]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
//-----------------------------------------------------------------------------------------
// Step 6: Voxelization
GLuint fbo;
int buffersize = min(layer, maxColorBuffers);
int tile;
for(tile=0; tile < z_tile; tile++)
{
for(nAxis=0; nAxis < 3; nAxis++)
{
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,fbo);
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[nAxis]);
for(int a=tile*maxColorBuffers; a < min(maxColorBuffers,layer-(tile*maxColorBuffers)); a++) glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, buffers[a] ,GL_TEXTURE_3D_EXT, PrimitiveVoxel[nAxis], 0, a);
id4 = glGetUniformLocationARB(g_programObj,"tile");
glUniform1iARB(id4,tile);
glDrawBuffers(buffersize,buffers);
glEnable(GL_DEPTH_TEST);
glDisable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR);
glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glViewport(0,0,res,res);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-width*0.5,width*0.5,-width*0.5,width*0.5,width*0.5,-width*0.5);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glClearColorIuiEXT(0,0,0,0);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glCallList(dispListIndex);
glFlush();
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glDisable(GL_COLOR_LOGIC_OP);
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
glEnable(GL_POINT_SMOOTH);
glClearColorIuiEXT(0,0,0,0);
glDeleteFramebuffersEXT (1,&fbo);
}
}
glUseProgramObjectARB(0);
glDeleteLists(dispListIndex, 1);
glBindTexture( GL_TEXTURE_RECTANGLE_ARB, 0);
glDeleteTextures(1, &vertexTexture);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
//-----------------------------------------------------------------------------------------
// Step 7: Build Composite Shader
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\CompositeVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Composite Vertex Shader Compile Error\n\n %s ", str); return false;
}
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\CompositeFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Composite Fragment Shader Compile Error\n\n %s", str); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL! \n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
for(i=0; i < min(layer, maxColorBuffers); i++){
memset(fileadd,0,256*sizeof(char));
strcat(fileadd, "Outdata");
value[0] = '\0';
sprintf(value, "%d", i+1 );
strcat(fileadd, value);
glBindFragDataLocationEXT(g_programObj,i,fileadd);
}
//-------------------------------------------------------------------------------
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 8: Composite the voxelization result
cudaGraphicsResource *resource;
int t_index = glGetAttribLocation( g_programObj, "in_coord");
CUDA_SAFE_CALL( hipGraphicsGLRegisterImage(&resource, PrimitiveVoxel[2], GL_TEXTURE_3D, hipGraphicsMapFlagsReadOnly) );
for(tile=0; tile < z_tile; tile++)
{
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,fbo);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[2]);
for(int a=tile*maxColorBuffers; a < min(maxColorBuffers,layer-(tile*maxColorBuffers)); a++)
glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, buffers[a] ,GL_TEXTURE_3D_EXT, PrimitiveVoxel[2], 0, a);
//CUDA_SAFE_CALL( hipGraphicsGLRegisterImage(&resource, PrimitiveVoxel[2], GL_TEXTURE_3D, hipGraphicsMapFlagsReadOnly) );
glUseProgramObjectARB(g_programObj);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[0]);
glDisable(GL_TEXTURE_3D_EXT);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[1]);
glDisable(GL_TEXTURE_3D_EXT);
GLuint fetchXIndex = glGetSubroutineIndex(g_programObj, GL_FRAGMENT_SHADER, "FetchTextureX");
GLuint fetchYIndex = glGetSubroutineIndex(g_programObj, GL_FRAGMENT_SHADER, "FetchTextureY");
GLint tex0;
tex0 = glGetUniformLocationARB(g_programObj,"Xtex");
glUniform1iARB(tex0,0);
tex0 = glGetUniformLocationARB(g_programObj,"Ytex");
glUniform1iARB(tex0,1);
id0 = glGetUniformLocationARB(g_programObj,"res");
glUniform1iARB(id0,res);
glDrawBuffers(min(maxColorBuffers,layer-(tile*maxColorBuffers)),buffers);
glDisable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR);
glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glDisable(GL_LOGIC_OP);
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0);
glViewport(0, 0, res, res);
glClearColorIuiEXT(0,0,0,0);
glClear( GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glUniformSubroutinesuiv( GL_FRAGMENT_SHADER, 1, &fetchXIndex);
float l = -1.0-(1.0/(tilesize/128));
glBegin(GL_QUADS);
for(int i=tile*maxColorBuffers+1; i<=min(maxColorBuffers,layer-(tile*maxColorBuffers)) ; i++)
{
glVertexAttrib3f(t_index, 0, res, i-1); glVertex3f(-1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, res , i-1); glVertex3f( 1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, 0, i-1); glVertex3f( 1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, 0, 0 , i-1); glVertex3f(-1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
}
glEnd();
glFlush();
//glCallList(dispListIndex);
//
/*float layer = -1.0-(1.0/(res/128));
glBegin(GL_QUADS);
for(int i=1;i<=(res/128);i++)
{
glTexCoord3i(0 , res , i-1); glVertex3f(-1.0f, 1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(res, res , i-1); glVertex3f( 1.0f, 1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(res, 0 , i-1); glVertex3f( 1.0f,-1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(0 , 0 , i-1); glVertex3f(-1.0f,-1.0f, layer + i*(2.0/(res/128)));
}
glEnd();
glFlush();*/
glUniformSubroutinesuiv( GL_FRAGMENT_SHADER, 1, &fetchYIndex);
l = -1.0-(1.0/(tilesize/128));
glBegin(GL_QUADS);
for(int i=tile*maxColorBuffers+1; i<=min(maxColorBuffers,layer-(tile*maxColorBuffers)) ; i++)
{
glVertexAttrib3f(t_index, 0, res, i-1); glVertex3f(-1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, res , i-1); glVertex3f( 1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, 0, i-1); glVertex3f( 1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, 0, 0 , i-1); glVertex3f(-1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
}
glEnd();
glFlush();
glDisable(GL_COLOR_LOGIC_OP);
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
glEnable(GL_POINT_SMOOTH);
glDisable(GL_COLOR_LOGIC_OP);
glClearColorIuiEXT(0,0,0,0);
}
glBindTexture(GL_TEXTURE_3D_EXT,0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
glDeleteFramebuffersEXT (1,&fbo);
glUseProgramObjectARB(0);
CUDA_SAFE_CALL( hipGraphicsMapResources( 1, &resource, NULL ) );
hipArray *in_array;
CUDA_SAFE_CALL( hipGraphicsSubResourceGetMappedArray( &in_array, resource, 0, 0));
CUDA_SAFE_CALL( hipBindTextureToArray(site_tex, in_array) );
CUDA_SAFE_CALL( hipGraphicsUnmapResources( 1, &resource, NULL ) );
//vbosize = LDNIDistanceField_Read3DTextureToVBO(resource, vbo, res, width, origin);
/*int arrsize = res*res;
CUDA_SAFE_CALL(hipMalloc((void**) &sites_index, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( sites_index, 0, (arrsize+1)*sizeof(unsigned int)) );
hipLaunchKernelGGL(( LDNIDistanceField_CountBitInInteger), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, sites_index, res*res*(res/128), res);
thrust::device_ptr<unsigned int> dev_ptr(sites_index); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int siteNum=dev_ptr[arrsize];
printf("Number of Sites: ----- %d\n",siteNum);
vbosize = siteNum;
CUDA_SAFE_CALL(hipMalloc((void**) &sites, siteNum*sizeof(unsigned short)));
CUDA_SAFE_CALL(hipMemset( sites, 0, siteNum*sizeof(unsigned short)) );
unsigned int *temp2D;
CUDA_SAFE_CALL(hipMalloc((void**) &temp2D, arrsize*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( temp2D, 0, arrsize*sizeof(unsigned int)) );
hipLaunchKernelGGL(( LDNIDistanceField__writeTexToArray), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, sites, res, sites_index, temp2D, res*res*(res/128));
hipFree(temp2D);
hipFree(counter);*/
unsigned int *counter;
CUDA_SAFE_CALL(hipMalloc((void**) &counter,sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( counter, 0, sizeof(unsigned int)) );
hipLaunchKernelGGL(( PBADistanceField__writeTexToArray), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, inputDF, res, res*res*(res/128), counter);
CUDA_SAFE_CALL( hipMemcpy( &vbosize, counter, sizeof(unsigned int), hipMemcpyDeviceToHost ) );
hipGraphicsUnregisterResource(resource);
/**/
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
glDeleteTextures(3, PrimitiveVoxel);
glDisable(GL_TEXTURE_3D_EXT);
glDisable(GL_TEXTURE_RECTANGLE_ARB);
return true;
}
bool LDNIcudaOperation::LDNIDistanceField_SitesGeneration(QuadTrglMesh *mesh, GLuint *vbo, int &vbosize, int res, float boundingBox[], unsigned int *&sites_index, unsigned short *&sites)
{
const bool bCube=true;
float origin[3],gWidth, width; long time=clock(),totalTime=clock();
int i,nodeNum;
char fileadd[256];
if ((boundingBox[0]==boundingBox[1]) && (boundingBox[2]==boundingBox[3]) && (boundingBox[4]==boundingBox[5])) {
mesh->CompBoundingBox(boundingBox);
if (bCube) {
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
}
}
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
width = gWidth*(float)res;
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
if (glewIsSupported("GL_VERSION_2_0")) {printf("\nReady for OpenGL 2.0\n");} else {printf("OpenGL 2.0 not supported\n"); return false;}
//-----------------------------------------------------------------------------------------
int dispListIndex; GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
GLuint vertexTexture;
const char *VshaderString[1],*GshaderString[1],*FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
char str[4096] = ""; int xF,yF;
short nAxis;
GLenum buffers[16] = {GL_COLOR_ATTACHMENT0_EXT, GL_COLOR_ATTACHMENT1_EXT, GL_COLOR_ATTACHMENT2_EXT, GL_COLOR_ATTACHMENT3_EXT
, GL_COLOR_ATTACHMENT4_EXT, GL_COLOR_ATTACHMENT5_EXT, GL_COLOR_ATTACHMENT6_EXT, GL_COLOR_ATTACHMENT7_EXT
, GL_COLOR_ATTACHMENT8_EXT, GL_COLOR_ATTACHMENT9_EXT, GL_COLOR_ATTACHMENT10_EXT, GL_COLOR_ATTACHMENT11_EXT
, GL_COLOR_ATTACHMENT12_EXT, GL_COLOR_ATTACHMENT13_EXT, GL_COLOR_ATTACHMENT14_EXT, GL_COLOR_ATTACHMENT15_EXT};
//-----------------------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\sampleLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\newSampleLDNIGShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\voxelLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Fragment Shader Compile Error\n\n %s", str); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL! 1 \n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// texture setting for fragment shader
//memset(fileadd,0,256*sizeof(char));
//strcat(fileadd, "Outdata");
int maxColorBuffers, maxTextureSize;
int layer = res/128;
glGetIntegerv( GL_MAX_COLOR_ATTACHMENTS_EXT, &maxColorBuffers );
glGetIntegerv( GL_MAX_3D_TEXTURE_SIZE_EXT, &maxTextureSize );
int z_tile = ceil(layer/(float)maxColorBuffers);
printf("max texture size %d %d\n", maxTextureSize, layer);
char value[10];
for(i=0; i < min(layer, maxColorBuffers); i++){
memset(fileadd,0,256*sizeof(char));
strcat(fileadd, "Outdata");
value[0] = '\0';
sprintf(value, "%d", i+1 );
strcat(fileadd, value);
glBindFragDataLocationEXT(g_programObj,i,fileadd);
}
int tilesize = min(layer, maxColorBuffers)*128;
//-------------------------------------------------------------------------------
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating texture for vertex array and binding
long texBindingTime=clock();
glGetError(); // for clean-up the error generated before
nodeNum=mesh->GetNodeNumber(); _texCalProduct(nodeNum,xF,yF);
int temp;
for(temp=1;temp<xF;temp *= 2) {}
xF = temp; //if (xF<64) xF=64;
yF = (int)(nodeNum/xF)+1; if (yF<64) yF=64;
printf("Texture Size: xF=%d yF=%d\n",xF,yF);
float* verTex=(float*)malloc(xF*yF*3*sizeof(float));
memset(verTex,0,xF*yF*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glGenTextures(1, &vertexTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, vertexTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB32F_ARB, xF, yF, 0, GL_RGB, GL_FLOAT, verTex);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, 0);
free(verTex);
if (glGetError()!=GL_NO_ERROR) printf("Error: GL_TEXTURE_RECTANGLE_ARB texture binding!\n\n");
texBindingTime=clock()-texBindingTime;
printf("\nTime for binding texture onto the graphics memory - %ld (ms)\n\n",texBindingTime);
//-----------------------------------------------------------------------------------------
// Step 3: building GL-list for activating the geometry shader
unsigned int ver[4];
int faceNum=mesh->GetFaceNumber();
dispListIndex = glGenLists(1);
glNewList(dispListIndex, GL_COMPILE);
glBegin(GL_POINTS);
for(i=0;i<faceNum;i++) {
mesh->GetFaceNodes(i+1,ver[0],ver[1],ver[2],ver[3]);
glVertex3i(ver[0]-1,ver[1]-1,ver[2]-1);
if (mesh->IsQuadFace(i+1)) {glVertex3i(ver[0]-1,ver[2]-1,ver[3]-1);} // one more triangle
}
glEnd();
glEndList();
//-----------------------------------------------------------------------------------------
// Step 4: using program objects and the texture
GLint id0,id1,id2,id3,id4; float centerPos[3];
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB,vertexTexture);
glUseProgramObjectARB(g_programObj);
id0 = glGetUniformLocationARB(g_programObj,"sizeNx");
glUniform1iARB(id0,xF);
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
id1 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id1,centerPos[0],centerPos[1],centerPos[2]);
id2 = glGetUniformLocationARB(g_programObj,"res");
glUniform1iARB(id2,res);
id3 = glGetUniformLocationARB(g_programObj,"tilesize");
glUniform1iARB(id3,tilesize);
if (glGetError()!=GL_NO_ERROR) printf("Error: vertex texture binding!\n\n");
printf("Create shader texture\n");
//-----------------------------------------------------------------------------------------
// Step 5: Prepare 3D texture for voxelization
GLuint PrimitiveVoxel[3];
glEnable(GL_TEXTURE_3D_EXT);
glGenTextures(1, &PrimitiveVoxel[0]); // x-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[0]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
//if res <= 2048 , create texture directly. Otherwise, need to subdivide the texture
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glGenTextures(1, &PrimitiveVoxel[1]); // y-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[1]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glGenTextures(1, &PrimitiveVoxel[2]); // z-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[2]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
//-----------------------------------------------------------------------------------------
// Step 6: Voxelization
GLuint fbo;
int buffersize = min(layer, maxColorBuffers);
int tile;
for(tile=0; tile < z_tile; tile++)
{
for(nAxis=0; nAxis < 3; nAxis++)
{
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,fbo);
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[nAxis]);
for(int a=tile*maxColorBuffers; a < min(maxColorBuffers,layer-(tile*maxColorBuffers)); a++) glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, buffers[a] ,GL_TEXTURE_3D_EXT, PrimitiveVoxel[nAxis], 0, a);
printf("tile - %d %d %d \n", z_tile, tile, buffersize);
id4 = glGetUniformLocationARB(g_programObj,"tile");
glUniform1iARB(id4,tile);
glDrawBuffers(buffersize,buffers);
glEnable(GL_DEPTH_TEST);
glDisable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR);
glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glViewport(0,0,res,res);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-width*0.5,width*0.5,-width*0.5,width*0.5,width*0.5,-width*0.5);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glClearColorIuiEXT(0,0,0,0);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glCallList(dispListIndex);
glFlush();
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glDisable(GL_COLOR_LOGIC_OP);
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
glEnable(GL_POINT_SMOOTH);
glClearColorIuiEXT(0,0,0,0);
glDeleteFramebuffersEXT (1,&fbo);
}
}
glUseProgramObjectARB(0);
glDeleteLists(dispListIndex, 1);
glBindTexture( GL_TEXTURE_RECTANGLE_ARB, 0);
glDeleteTextures(1, &vertexTexture);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
//-----------------------------------------------------------------------------------------
// Step 7: Build Composite Shader
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\CompositeVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Composite Vertex Shader Compile Error\n\n %s ", str); return false;
}
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\CompositeFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Composite Fragment Shader Compile Error\n\n %s", str); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL! \n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
for(i=0; i < min(layer, maxColorBuffers); i++){
memset(fileadd,0,256*sizeof(char));
strcat(fileadd, "Outdata");
value[0] = '\0';
sprintf(value, "%d", i+1 );
strcat(fileadd, value);
glBindFragDataLocationEXT(g_programObj,i,fileadd);
}
//-------------------------------------------------------------------------------
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 8: Composite the voxelization result
cudaGraphicsResource *resource;
int t_index = glGetAttribLocation( g_programObj, "in_coord");
CUDA_SAFE_CALL( hipGraphicsGLRegisterImage(&resource, PrimitiveVoxel[2], GL_TEXTURE_3D, hipGraphicsMapFlagsReadOnly) );
for(tile=0; tile < z_tile; tile++)
{
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,fbo);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[2]);
for(int a=tile*maxColorBuffers; a < min(maxColorBuffers,layer-(tile*maxColorBuffers)); a++)
glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, buffers[a] ,GL_TEXTURE_3D_EXT, PrimitiveVoxel[2], 0, a);
//CUDA_SAFE_CALL( hipGraphicsGLRegisterImage(&resource, PrimitiveVoxel[2], GL_TEXTURE_3D, hipGraphicsMapFlagsReadOnly) );
glUseProgramObjectARB(g_programObj);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[0]);
glDisable(GL_TEXTURE_3D_EXT);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[1]);
glDisable(GL_TEXTURE_3D_EXT);
GLuint fetchXIndex = glGetSubroutineIndex(g_programObj, GL_FRAGMENT_SHADER, "FetchTextureX");
GLuint fetchYIndex = glGetSubroutineIndex(g_programObj, GL_FRAGMENT_SHADER, "FetchTextureY");
GLint tex0;
tex0 = glGetUniformLocationARB(g_programObj,"Xtex");
glUniform1iARB(tex0,0);
tex0 = glGetUniformLocationARB(g_programObj,"Ytex");
glUniform1iARB(tex0,1);
id0 = glGetUniformLocationARB(g_programObj,"res");
glUniform1iARB(id0,res);
glDrawBuffers(min(maxColorBuffers,layer-(tile*maxColorBuffers)),buffers);
glDisable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR);
glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glDisable(GL_LOGIC_OP);
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0);
glViewport(0, 0, res, res);
glClearColorIuiEXT(0,0,0,0);
glClear( GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glUniformSubroutinesuiv( GL_FRAGMENT_SHADER, 1, &fetchXIndex);
float l = -1.0-(1.0/(tilesize/128));
glBegin(GL_QUADS);
for(int i=tile*maxColorBuffers+1; i<=min(maxColorBuffers,layer-(tile*maxColorBuffers)) ; i++)
{
glVertexAttrib3f(t_index, 0, res, i-1); glVertex3f(-1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, res , i-1); glVertex3f( 1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, 0, i-1); glVertex3f( 1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, 0, 0 , i-1); glVertex3f(-1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
}
glEnd();
glFlush();
//glCallList(dispListIndex);
//
/*float layer = -1.0-(1.0/(res/128));
glBegin(GL_QUADS);
for(int i=1;i<=(res/128);i++)
{
glTexCoord3i(0 , res , i-1); glVertex3f(-1.0f, 1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(res, res , i-1); glVertex3f( 1.0f, 1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(res, 0 , i-1); glVertex3f( 1.0f,-1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(0 , 0 , i-1); glVertex3f(-1.0f,-1.0f, layer + i*(2.0/(res/128)));
}
glEnd();
glFlush();*/
glUniformSubroutinesuiv( GL_FRAGMENT_SHADER, 1, &fetchYIndex);
l = -1.0-(1.0/(tilesize/128));
glBegin(GL_QUADS);
for(int i=tile*maxColorBuffers+1; i<=min(maxColorBuffers,layer-(tile*maxColorBuffers)) ; i++)
{
glVertexAttrib3f(t_index, 0, res, i-1); glVertex3f(-1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, res , i-1); glVertex3f( 1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, 0, i-1); glVertex3f( 1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, 0, 0 , i-1); glVertex3f(-1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
}
glEnd();
glFlush();
glDisable(GL_COLOR_LOGIC_OP);
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
glEnable(GL_POINT_SMOOTH);
glDisable(GL_COLOR_LOGIC_OP);
glClearColorIuiEXT(0,0,0,0);
}
glBindTexture(GL_TEXTURE_3D_EXT,0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
glDeleteFramebuffersEXT (1,&fbo);
glUseProgramObjectARB(0);
CUDA_SAFE_CALL( hipGraphicsMapResources( 1, &resource, NULL ) );
hipArray *in_array;
CUDA_SAFE_CALL( hipGraphicsSubResourceGetMappedArray( &in_array, resource, 0, 0));
CUDA_SAFE_CALL( hipBindTextureToArray(site_tex, in_array) );
CUDA_SAFE_CALL( hipGraphicsUnmapResources( 1, &resource, NULL ) );
printf("Memory Spent %.2f(MB)\n",(res*res*res/8)*1e-6);/**/
//vbosize = LDNIDistanceField_Read3DTextureToVBO(resource, vbo, res, width, origin);
int arrsize = res*res;
CUDA_SAFE_CALL(hipMalloc((void**) &sites_index, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( sites_index, 0, (arrsize+1)*sizeof(unsigned int)) );
hipLaunchKernelGGL(( LDNIDistanceField_CountBitInInteger), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, sites_index, res*res*(res/128), res);
thrust::device_ptr<unsigned int> dev_ptr(sites_index); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int siteNum=dev_ptr[arrsize];
printf("Number of Sites: ----- %d\n",siteNum);
vbosize = siteNum;
CUDA_SAFE_CALL(hipMalloc((void**) &sites, siteNum*sizeof(unsigned short)));
CUDA_SAFE_CALL(hipMemset( sites, 0, siteNum*sizeof(unsigned short)) );
unsigned int *counter;
CUDA_SAFE_CALL(hipMalloc((void**) &counter,sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( counter, 0, sizeof(unsigned int)) );
unsigned int *temp2D;
CUDA_SAFE_CALL(hipMalloc((void**) &temp2D, arrsize*sizeof(unsigned int)));
CUDA_SAFE_CALL(hipMemset( temp2D, 0, arrsize*sizeof(unsigned int)) );
hipLaunchKernelGGL(( LDNIDistanceField__writeTexToArray), dim3(BLOCKS_PER_GRID),dim3(THREADS_PER_BLOCK), 0, 0, sites, res, sites_index, temp2D, res*res*(res/128));
hipFree(temp2D);
hipFree(counter);
hipGraphicsUnregisterResource(resource);
/**/
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
glDeleteTextures(3, PrimitiveVoxel);
glDisable(GL_TEXTURE_3D_EXT);
glDisable(GL_TEXTURE_RECTANGLE_ARB);
return true;
}
void LDNIcudaOperation::PBADistanceField_CompareResult(int *inputDF, int res, int numOfSite, int *sites)
{
float totalDistError = 0.0;
float maxDistError = 0.0;
int errorCount = 0;
int dx, dy, dz, nx, ny, nz;
double dist, myDist, correctDist, error;
int* output = (int*)malloc(res*res*res*sizeof(int));
CUDA_SAFE_CALL(hipMemcpy(output, inputDF, res*res*res*sizeof(int), hipMemcpyDeviceToHost));
for (int i = 0; i < res; i++)
for (int j = 0; j < res; j++)
for (int k = 0; k < res; k++) {
int id = TOID_CPU(i, j, k, res);
DECODE(output[id], nx, ny, nz);
dx = nx - i; dy = ny - j; dz = nz - k;
correctDist = myDist = dx * dx + dy * dy + dz * dz;
//if (output[k*res*res+j*res+i] == 0)
//if (i == 0 && j == 245 && k == 231)
//{
// printf("error~~~~~~~~~ %d %d %d \n", i, j, k);
// printf(" Error!!!!!!!!!!!! %d %d %d %d %d %f \n", res, output[id], nx,ny,nz, myDist);
//}
for (int t = 0; t < numOfSite; t++) {
DECODE(sites[t], nx, ny, nz);
dx = nx - i; dy = ny - j; dz = nz - k;
dist = dx * dx + dy * dy + dz * dz;
if (dist < correctDist)
{
/*if (i == 0 && j == 245 && k == 231)
{
printf("%d %d %f %f %d %d %d \n", t, sites[t], correctDist, dist, nx,ny,nz);
}*/
correctDist = dist;
}
}
if (correctDist != myDist) {
error = fabs(sqrt(myDist) - sqrt(correctDist));
if (i == 0 && j == 245 && k == 231)
{
//printf(" Error!!!!!!!!!!!! %d %d %d \n", i, j, k);
printf(" Error!!!!!!!!!!!! %f %f %f %d %d %d \n", myDist, dist, correctDist, i,j,k);
}
errorCount++;
totalDistError += error;
if (error > maxDistError)
maxDistError = error;
}
}
free(output);
}
//--------------------------------------------------------------------------------------------
// Kernel functions
//--------------------------------------------------------------------------------------------
__global__ void LDNIDistanceField_CountBitInArray(unsigned int *d_index, unsigned int *m_3dArray, int nodeNum, int res)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int temp;
int ix,iy,iz;
unsigned int count = 0;
while(tid<nodeNum) {
temp = m_3dArray[tid];
count = bitCount(temp);
atomicAdd(d_index,count);
/*count = 0;
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
count= bitCount(temp.x);
count+= bitCount(temp.y);
count+= bitCount(temp.z);
count+= bitCount(temp.w);
//atomicAdd(d_output,count);
atomicAdd(&d_index[iy*res+ix],count);
*/
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField_CountBitInInteger(unsigned int *d_index, int nodeNum, int res)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
uint4 temp;
int ix,iy,iz;
unsigned int count = 0;
while(tid<nodeNum) {
count = 0;
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
count= bitCount(temp.x);
count+= bitCount(temp.y);
count+= bitCount(temp.z);
count+= bitCount(temp.w);
//atomicAdd(d_output,count);
atomicAdd(&d_index[iy*res+ix],count);
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__FilterProbablySiteInXByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int st = 0, num = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int temp;
int currentSite, prevSite, dist1, dist2;
short currentIndex, ind;
float d;
short i,j;
unsigned int buffer[THREADS_PER_BLOCK] = {0};
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
//printf("error %d %d %d %d %d %d\n", tid, ix, iy, iz,(tid/(chunksize*res)),(tid%(chunksize*res)/(res*res)) );
return;
}
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num > 0)
currentSite = sites[st];
prevSite = 0;
currentIndex = 0;
}
if (num > 0)
{
//if (ix ==512 && iy == 512)
// printf("tid %d %d %d %d %d %d %d %d \n", iz, num, st, prevSite, currentSite, currentIndex, sites[st], sites[st+1]);
if (iz == currentSite)
{
prevSite = currentSite;
currentIndex++;
if (currentIndex >= num)
{prevSite = 0;}
else
{currentSite = sites[st+currentIndex];}
}
if (prevSite <=0)
{
dist1 = abs((int)iz-currentSite);
if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
//printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else
{
dist2 = abs((int)iz-currentSite);
dist1 = abs((int)iz-prevSite);
if (dist1 <= offsetPixel || dist2 <=offsetPixel)
{
if (dist1 <= dist2)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
}
else
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
}
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d %d\n", iz, dist1, dist2, ind, prevSite, currentSite);
}
}
}
if ((iz+1)%32 == 0 && num>0)
{
j=0;
//for(i=max(0,iy-offsetPixel); i<=min(res,iy+offsetPixel); j++,i++)
for(i=ix-offsetPixel; i<=ix+offsetPixel; j++,i++)
{
if (i<0 || i >= res) continue;
if (buffer[j]!=0)
{
atomicXor(&bitSites[(iz/32)*res*res+iy*res+i], buffer[j] );
}
}
for(j=0;j<offsetPixel*2+1;j++)
buffer[j]=0;
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__FilterProbablySiteInYByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int st = 0, num = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int temp;
int currentSite, prevSite, dist1, dist2;
short currentIndex, ind;
float d;
short i,j;
unsigned int buffer[THREADS_PER_BLOCK] = {0};
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
//printf("error %d %d %d %d %d %d\n", tid, ix, iy, iz,(tid/(chunksize*res)),(tid%(chunksize*res)/(res*res)) );
return;
}
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num > 0)
currentSite = sites[st];
prevSite = 0;
currentIndex = 0;
}
if (num > 0)
{
//if (ix ==512 && iy == 512)
// printf("tid %d %d %d %d %d %d %d %d \n", iz, num, st, prevSite, currentSite, currentIndex, sites[st], sites[st+1]);
if (iz == currentSite)
{
prevSite = currentSite;
currentIndex++;
if (currentIndex >= num)
{currentSite = 0;}
else
{currentSite = sites[st+currentIndex];}
}
if (prevSite <=0 && currentSite > 0)
{
dist1 = abs((int)iz-currentSite);
if(dist1 <= offsetPixel && iz <= currentSite)
//if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
//printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else if (prevSite > 0 && currentSite <= 0)
{
dist2 = abs((int)iz-prevSite);
if(dist2 <= offsetPixel && iz >= prevSite)
//if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
//printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else if (prevSite > 0 && currentSite > 0)
{
dist2 = abs((int)iz-currentSite);
dist1 = abs((int)iz-prevSite);
if (dist1 <= offsetPixel || dist2 <=offsetPixel)
{
if (dist1 <= dist2 && iz <= prevSite)
//if (dist1 <= dist2 )
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
}
else if (dist1 > dist2 && iz <= currentSite)
//else
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d %d\n", iz, dist1, dist2, ind, prevSite, currentSite);
}
}
}
if ((iz+1)%32 == 0 && num>0)
{
j=0;
//for(i=max(0,iy-offsetPixel); i<=min(res,iy+offsetPixel); j++,i++)
for(i=iy-offsetPixel; i<=iy+offsetPixel; j++,i++)
{
if (i<0 || i >= res) continue;
if (buffer[j]!=0)
{
atomicXor(&bitSites[(iz/32)*res*res+i*res+ix], buffer[j] );
}
}
for(j=0;j<offsetPixel*2+1;j++)
buffer[j]=0;
}
tid += blockDim.x * gridDim.x;
}
}
#define LDNIMARKER 1024
__global__ void LDNIDistanceField__GetSiteByDist(ushort3 *sites, unsigned int *counter, unsigned int *sites_index, unsigned int *sites_off, int offdist, int res, int nodeNum)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int ix,iy,iz, st, num, ind,i;
ushort3 current_id, prev_id, temp;
unsigned int dist, bitResult, count;
float2 value;
float off;
while(index<nodeNum) {
iy = index%res;
iz = (index%(chunksize*res)/res)/(chunksize/res);
ix = (index/(chunksize*res))*(chunksize/res)+(index%(chunksize*res)%(chunksize)/res);
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num>0) current_id = sites[st];
prev_id = make_ushort3(LDNIMARKER,LDNIMARKER,LDNIMARKER);
ind = 0;
bitResult = 0;
count = 0;
off = 0.0;
}
if (num > 0)
{
if (iz == current_id.x)
{
prev_id = current_id;
ind++;
if (ind >= num)
current_id = make_ushort3(LDNIMARKER,LDNIMARKER,LDNIMARKER);
else
current_id = sites[st+ind];
bitResult = bitResult | SetBitPos(iz%32);
count++;
//if (ix == 334 && iy == 299 )
//printf("id: %d %d %d %d \n", prev_id.x, prev_id.y, prev_id.z, ind);
}
value.x = sqrt((float)((prev_id.x-iz)*(prev_id.x-iz)+(prev_id.y-ix)*(prev_id.y-ix)+(prev_id.z-iy)*(prev_id.z-iy)));
value.y = sqrt((float)((current_id.x-iz)*(current_id.x-iz)+(current_id.y-ix)*(current_id.y-ix)+(current_id.z-iy)*(current_id.z-iy)));
//if (ix == 334 && iy == 299)
//{
// printf("id: %d %d %d %d %d \n", iz, current_id.x, current_id.y, current_id.z, ind);
//for(i=0; i <num; i++)
//{
// temp = sites[st+i];
// printf("id: %d %d %d %d \n", temp.x, temp.y, temp.z, i);
//}
//}
//dist = (value.x < value.y)? value.x:value.y;
off = (value.x < value.y)? value.x:value.y;
//if (ix == 334 && iy == 299 && iz == 301)
//{
// printf("prev: %d %d %d %d %d\n", prev_id.x, prev_id.y, prev_id.z, st, num);
// printf("curr: %d %d %d \n", current_id.x, current_id.y, current_id.z);
// printf("%f %f %f %d %d %d %d \n", off, value.x, value.y, offdist, ix, iy, iz);
//}
//if (off > offdist && off < offdist+1.0)
//{
/*bitResult = bitResult | SetBitPos(iz%32);
count++;*/
//}
if ((iz+1)%32 == 0)
{
sites_off[(iz/32)*res*res+iy*res+ix]= bitResult;
bitResult = 0;
}
if (iz == res-1)
{
atomicAdd(counter, count);
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__SortProbablySite2(unsigned int *sites, unsigned int *sites_index, int res, int nodeNum)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix, iy, st, num, temp;
short i,j;
unsigned int tempdepth;
unsigned int depth[256];
while(index<nodeNum) {
st = sites_index[index];
num = sites_index[index+1]-st;
if (num > 0)
{
if (num > 256) { printf("too many num on one thread!!! %d\n", num); return;};
for(i=0;i<num;i++)
{
depth[i]=sites[st+i];
}
for(i=0;i<num;i++) {
for(j=i+1;j<num;j++) {
if (GET_X(depth[i]) > GET_X(depth[j]) ){
tempdepth=depth[i]; depth[i]=depth[j]; depth[j]=tempdepth;
}
}
}
for(i=0;i<num;i++)
{
sites[st+i]=depth[i];
//if (index == 143640)
// printf("depth %d %d %d \n", GET_X(depth[i]), GET_Y(depth[i]), GET_Z(depth[i]));
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__SortProbablySite(unsigned int *sites, unsigned int *sites_index, int res, int nodeNum)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix, iy, st, num, temp;
short i,j;
unsigned int tempdepth;
unsigned int depth[256];
while(index<nodeNum) {
st = sites_index[index];
num = sites_index[index+1]-st;
if (num > 0)
{
if (num > 256) { printf("too many num on one thread!!! %d\n", num); return;};
/*if (506*res + 256 == index)
printf("num %d \n", num);*/
for(i=0;i<num;i++)
{
depth[i]=sites[st+i];
/* if (506*res + 256 == index)
printf("nnnn %d \n", depth[i]);*/
}
for(i=0;i<num;i++) {
for(j=i+1;j<num;j++) {
//f (depth[i].x>depth[j].x) {
if (GET_STACK(depth[i]) > GET_STACK(depth[j]) ){
tempdepth=depth[i]; depth[i]=depth[j]; depth[j]=tempdepth;
}
}
}
for(i=0;i<num;i++)
{
//if (tz == 250 && tx == 431 )
//if (index == 220922)
// printf("%d %d %d \n", i, GET_STACK(depth[i]), GET_PTR(depth[i]));
sites[st+i]=depth[i];
}
}
/*else
{
printf("no site %d \n", index);
}*/
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GetProbablySiteInY(unsigned int *bitDeleted, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, unsigned short *sites_x, unsigned int *sites_index_x, int3 res, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int ix,iy,iz;
unsigned int bitresult, st_y, num_y;
//short current_id, prev_id, dist;
short middle_id[BANDWIDTH], ind[BANDWIDTH], current_id[BANDWIDTH], next_id[BANDWIDTH];
short num[BANDWIDTH], i, j, k, count;
unsigned int st[BANDWIDTH], stack[BANDWIDTH];
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
bitresult = 0;
if (iz == 0)
{
j = iy*BANDWIDTH;
for(i=0; i < BANDWIDTH; i++)
{
st[i] = sites_index_x[(j+i)*res.x+ix];
num[i] = sites_index_x[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites_x[st[i]];
next_id[i] = sites_x[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites_x[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
bitresult = ~bitDeleted[iy*res.x*res.z+ix*res.x+iz];
//if (__popc(bitresult)>0)
//{
count = 0;
/*if (ix == 32 && iz == 1)
printf("test test %d %d %d %d \n", ix, iy, iz, __popc(bitresult));*/
for(i=0; i < BANDWIDTH ; i++)
{
if (num[i]>0 && GetBitPos(i, bitresult))
{
if (iz < middle_id[i])
{
stack[count] = ENCODE_STACK(iy*BANDWIDTH+i, current_id[i]);
//if (ix == 256 && iy == 5 && iz == 508)
//if (ix == 65 && iy == 3 && i == 8 )
// printf("test test %d %d %d \n", stack[count], current_id[i], iy*BANDWIDTH+i );
}
else
{
if (ind[i] < num[i])
{
k = sites_x[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + k)/2.0);
current_id[i] = next_id[i];
next_id[i] = k;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[count] = ENCODE_STACK(iy*BANDWIDTH+i, current_id[i]);
}
count++;
}
}
//if (ix == 32 && iz == 1)
// printf("@@@ %d %d %d %d \n", ix, iy, iz, count);
//if (ix == 256 && iy == 5 && iz == 508)
// printf("test test %d %d \n", count, st_y);
st_y = sites_index[ix*res.x+iz];
i = atomicAdd(&counter[ix*res.x+iz],count);
for(j=0; j < count ; j++)
{
sites[st_y+i+j] = stack[j];
//if (ix == 256 && iy == 5 && iz == 508)
//if (ix == 25 && iz == 250)
// printf("@@ %d %d %d %d \n", j, i, GET_STACK(stack[j]), GET_PTR(stack[j]));
}
//}
/*if (iz == 0)
{
st = sites_index_x[iy*res+ix];
num = sites_index_x[iy*res+ix+1]-st;
if (num>0) current_id = sites_x[st];
prev_id = LDNIMARKER;
ind = 0;
}
if (num > 0)
{
if (iz%32 == 0)
{
value = bitDeleted[(iz/32)*res*res+iy*res+ix];
}
if (iz == current_id)
{
prev_id = current_id;
ind++;
if (ind >= num)
current_id = LDNIMARKER;
else
current_id = sites_x[st+ind];
}
if (!GetBitPos(iz%32, value))
{
dist = (abs((int)(prev_id-iz)) < abs((int)(current_id-iz)))? prev_id:current_id;
st_y = sites_index[ix*res+iz];
i = atomicAdd(&counter[ix*res+iz],1);
sites[st_y+i] = make_ushort2(iy, dist);
}
}*/
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GetProbablySiteInX(unsigned int *bitDeleted, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, unsigned int *sites_in, unsigned int *sites_index_in, int3 res, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int ix,iy,iz;
unsigned int bitresult, st_y, num_y;
unsigned int current_id[BANDWIDTH], next_id[BANDWIDTH];
short num[BANDWIDTH], ind[BANDWIDTH], i;
int j, k, count, temp;
unsigned int st[BANDWIDTH], stack[BANDWIDTH];
int middle_id[BANDWIDTH];
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
bitresult = 0;
if (iz == 0)
{
j = iy*BANDWIDTH;
for(i=0; i < BANDWIDTH; i++)
{
st[i] = sites_index_in[(j+i)*res.x+ix];
num[i] = sites_index_in[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites_in[st[i]];
next_id[i] = sites_in[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites_in[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
bitresult = ~bitDeleted[iy*res.x*res.z+ix*res.x+iz];
//if (__popc(bitresult)>0)
//{
count = 0;
for(i=0; i < BANDWIDTH ; i++)
{
if (num[i]>0)
{
if ((int)iz < middle_id[i])
{
if (GetBitPos(i, bitresult))
{
stack[count] = ENCODE_STACK_3(iy*BANDWIDTH+i, GET_STACK(current_id[i]), GET_PTR(current_id[i]));
count++;
}
}
else
{
if (ind[i] < num[i])
{
j = sites_in[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites_in[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos(i, bitresult))
{
stack[count] = ENCODE_STACK_3(iy*BANDWIDTH+i, GET_STACK(current_id[i]), GET_PTR(current_id[i]));
count++;
/*if (ix == 311 && iz == 256 && iy == 3 )
{
printf("middle %d %d %d %d %d %d %d %d %d %d\n", count, i,iy*BANDWIDTH+i, bitresult, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), GET_X(stack[]) );
}*/
}
}
//if (ix == 311 && iy == 9 && i == 0)
//{
//for(int test = 0; test < num[i]; test++)
//for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites_in[st[i]]+test, sites_in[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites_in[st[i]+test]), GET_PTR(sites_in[st[i]+test]));
//printf("%d %d %d \n", num[i], GET_STACK(sites_in[st[i]+test]), GET_PTR(sites_in[st[i]+test]));
//printf("%d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]));
//}
}
}
//if (ix == 32 && iz == 1)
// printf("@@@ %d %d %d %d \n", ix, iy, iz, count);
//if (ix == 256 && iy == 5 && iz == 508)
// printf("test test %d %d \n", count, st_y);
st_y = sites_index[ix*res.x+iz];
i = atomicAdd(&counter[ix*res.x+iz],count);
for(j=0; j < count ; j++)
{
sites[st_y+i+j] = stack[j];
//if (ix == 256 && iy == 5 && iz == 508)
//if (ix == 280 && iz == 280 && iy < 6)
// printf("@@ %d %d %d %d %d %d %d\n", bitresult, iy, j, i, GET_X(stack[j]), GET_Y(stack[j]), GET_Z(stack[j]));
//if (GET_X(stack[j]) == 25 && GET_Y(stack[j]) == 329 && GET_Z(stack[j]) == 293)
// printf("?? %d %d %d \n", ix, iy, iz);
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__CountProbablySiteInY(unsigned int *bitDeleted, unsigned int *counter, int res, int nodeNum)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz,count=0,value, st, num;
short i;
while(index<nodeNum) {
ix = index%res;
iy = index/res;
count = 0;
for (i = 0; i < res/32; i++)
{
value = ~bitDeleted[i*res*res+iy*res+ix];
count += __popc(value);
}
///if (ix == 0 && iy < 32)
// printf("no site !!! %d %d %d\n", ix, iy, count);
atomicAdd(&counter[iy*res+ix],count);
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInXLoop(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, ushort2 *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum, short loopID)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
ushort2 current_id[3];
short ind[3];
ushort2 prev_id[3];
unsigned int st[3], num[3], bitResult, bitCheck;
float x1, x2;
ushort2 p[3];
unsigned int count;
while(index<nodeNum) {
iy = index%res;
iz = (index%(chunksize*res)/res)/(chunksize/res);
ix = (index/(chunksize*res))*(chunksize/res)+(index%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
return;
}
if (iy > 0 && iy < res-1)
{
if (iz == 0)
{
st[1] = sites_index[iy*res+ix];
num[1] = sites_index[iy*res+ix+1]-st[1];
st[0] = sites_index[(iy-1)*res+ix];
num[0] = sites_index[(iy-1)*res+ix+1]-st[0];
if ((iy+loopID) < res)
{
st[2] = sites_index[(iy+loopID)*res+ix];
num[2] = sites_index[(iy+loopID)*res+ix+1]-st[2];
}
else
{
st[2] = 0;
num[2] = 0;
}
if (num[0]>0) current_id[0] = sites[st[0]];
if (num[1]>0) current_id[1] = sites[st[1]];
if (num[2]>0) current_id[2] = sites[st[2]];
//if (ix == 26 && iy == 25)
// printf("%d %d %d %d %d %d \n", num[0], num[1], num[2], current_id[0], current_id[1], current_id[2]);
prev_id[0] = make_ushort2(LDNIMARKER, LDNIMARKER); //iy-1
prev_id[1] = make_ushort2(LDNIMARKER, LDNIMARKER); //iy
prev_id[2] = make_ushort2(LDNIMARKER, LDNIMARKER); //iy+loopID
ind[0] = 0;
ind[1] = 0;
ind[2] = 0;
bitResult = 0;
bitCheck = 0;
count = 0;
}
if (num[0] > 0 && num[1] > 0 && num[2] > 0)
{
if (iz%32 == 0)
{
bitCheck = bitForNextLoop[(iz/32)*res*res+iy*res+ix];
}
//if (ix == 125 && iy == 256)
// printf("%d %d %d %d\n", iz, bitCheck,GetBitPos(iz%32, bitCheck) );
if (iz != current_id[1].x)
{
if ( GetBitPos(iz%32, bitCheck))
{
p[0] = ((prev_id[0].x-iz)*(prev_id[0].x-iz)+(prev_id[0].y-ix)*(prev_id[0].y-ix) < (current_id[0].x-iz)*(current_id[0].x-iz)+(current_id[0].y-ix)*(current_id[0].y-ix))? prev_id[0]:current_id[0];
p[1] = ((prev_id[1].x-iz)*(prev_id[1].x-iz)+(prev_id[1].y-ix)*(prev_id[1].y-ix) < (current_id[1].x-iz)*(current_id[1].x-iz)+(current_id[1].y-ix)*(current_id[1].y-ix))? prev_id[1]:current_id[1];
p[2] = ((prev_id[2].x-iz)*(prev_id[2].x-iz)+(prev_id[2].y-ix)*(prev_id[2].y-ix) < (current_id[2].x-iz)*(current_id[2].x-iz)+(current_id[2].y-ix)*(current_id[2].y-ix))? prev_id[2]:current_id[2];
x1 = interpointY(iy-1, p[0].x, p[0].y, iy, p[1].x, p[1].y, ix, iz) ;
x2 = interpointY(iy, p[1].x, p[1].y, iy+loopID, p[2].x, p[2].y, ix, iz) ;
if (x1 >= x2)
{
bitResult = bitResult | SetBitPos(iz%32);
count++;
}
}
}
else
{
prev_id[1] = current_id[1];
ind[1]++;
if (ind[1] >= num[1])
current_id[1] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[1] = sites[st[1]+ind[1]];
}
if (iz == current_id[0].x)
{
prev_id[0] = current_id[0];
ind[0]++;
if (ind[0] >= num[0])
current_id[0] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[0] = sites[st[0]+ind[0]];
}
if (iz == current_id[2].x)
{
prev_id[2] = current_id[2];
ind[2]++;
if (ind[2] >= num[2])
current_id[2] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[2] = sites[st[2]+ind[2]];
}
if ((iz+1)%32 == 0)
{
bitForNextLoop[(iz/32)*res*res+iy*res+ix]= bitResult;
atomicOr(&bitDeleted[(iz/32)*res*res+(iy+loopID-1)*res+ix], bitResult);
bitResult = 0;
}
if (iz == res-1)
{
atomicAdd(counter, count);
}
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInYLoop(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum, short loopID)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short current_id[3], ind[3];
short prev_id[3];
unsigned int st[3], num[3], bitResult, bitCheck;
float y1, y2;
short z[3];
unsigned int count;
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
return;
}
if (iy > 0 && iy < res-1)
{
if (iz == 0)
{
st[1] = sites_index[iy*res+ix];
num[1] = sites_index[iy*res+ix+1]-st[1];
st[0] = sites_index[(iy-1)*res+ix];
num[0] = sites_index[(iy-1)*res+ix+1]-st[0];
if ((iy+loopID) < res)
{
st[2] = sites_index[(iy+loopID)*res+ix];
num[2] = sites_index[(iy+loopID)*res+ix+1]-st[2];
}
else
{
st[2] = 0;
num[2] = 0;
}
if (num[0]>0) current_id[0] = sites[st[0]];
if (num[1]>0) current_id[1] = sites[st[1]];
if (num[2]>0) current_id[2] = sites[st[2]];
//if (ix == 26 && iy == 25)
// printf("%d %d %d %d %d %d \n", num[0], num[1], num[2], current_id[0], current_id[1], current_id[2]);
prev_id[0] = LDNIMARKER; //iy-1
prev_id[1] = LDNIMARKER; //iy
prev_id[2] = LDNIMARKER; //iy+loopID
ind[0] = 0;
ind[1] = 0;
ind[2] = 0;
bitResult = 0;
bitCheck = 0;
count = 0;
}
if (num[0] > 0 && num[1] > 0 && num[2] > 0 )//&& ix == 125 && ((iy <= 252 && iy>=200)))
{
if (iz%32 == 0)
{
bitCheck = bitForNextLoop[(iz/32)*res*res+iy*res+ix];
//if (ix == 26 && iy == 25)
// printf("%d %d \n", iz, bitCheck);
}
//if (ix == 125 && iy == 256)
// printf("%d %d %d %d\n", iz, bitCheck,GetBitPos(iz%32, bitCheck) );
if (iz != current_id[1])
{
if ( GetBitPos(iz%32, bitCheck))
{
z[0] = (abs((int)(prev_id[0]-iz)) < abs((int)(current_id[0]-iz)))? prev_id[0]:current_id[0];
z[1] = (abs((int)(prev_id[1]-iz)) < abs((int)(current_id[1]-iz)))? prev_id[1]:current_id[1];
z[2] = (abs((int)(prev_id[2]-iz)) < abs((int)(current_id[2]-iz)))? prev_id[2]:current_id[2];
y1 = interpointY(ix, iy-1, z[0], ix, iy, z[1], ix, iz) ;
y2 = interpointY(ix, iy, z[1], ix, iy+loopID, z[2], ix, iz) ;
if (y1 >= y2)
{
bitResult = bitResult | SetBitPos(iz%32);
count++;
}
/*if (ix == 26 && iy == 25)
{
printf("%d %d %d %d %f %f %d\n", iz, z[0], z[1], z[2], y1, y2, count);
printf(" %d %d %d %d %d %d \n", prev_id[0], prev_id[1], prev_id[2], current_id[0], current_id[1], current_id[2]);
}*/
if (ix == 125 && iy == 251 && iz == 211)
{
printf("%d %d %d %d %d %f %f %d\n", iy, iz, z[0], z[1], z[2], y1, y2, count);
//printf("a) %d %d %d %d %d %d %d %d \n",ix, iy-1, z[0], ix, iy, z[1], ix, iz);
//printf("b) %d %d %d %d %d %d %d %d \n",ix, iy, z[1], ix, iy+loopID, z[2], ix, iz);
//printf(" %d %d %d %d %d %d \n", prev_id[0], prev_id[1], prev_id[2], current_id[0], current_id[1], current_id[2]);
}
}
}
else
{
prev_id[1] = current_id[1];
ind[1]++;
if (ind[1] >= num[1])
current_id[1] = LDNIMARKER;
else
current_id[1] = sites[st[1]+ind[1]];
}
if (iz == current_id[0])
{
prev_id[0] = current_id[0];
ind[0]++;
if (ind[0] >= num[0])
current_id[0] = LDNIMARKER;
else
current_id[0] = sites[st[0]+ind[0]];
}
if (iz == current_id[2])
{
prev_id[2] = current_id[2];
ind[2]++;
if (ind[2] >= num[2])
current_id[2] = LDNIMARKER;
else
current_id[2] = sites[st[2]+ind[2]];
}
if ((iz+1)%32 == 0)
{
bitForNextLoop[(iz/32)*res*res+iy*res+ix]= bitResult;
atomicOr(&bitDeleted[(iz/32)*res*res+(iy+loopID-1)*res+ix], bitResult);
bitResult = 0;
}
if (iz == res-1)
{
//if (count > 0)
// printf("%d %d %d\n", ix, iy, count);
atomicAdd(counter, count);
}
}
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInX(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, ushort2 *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
ushort2 current_id[3];
short ind[3];
ushort2 prev_id[3];
unsigned int st[3], num[3], bitResult;
float x1, x2;
ushort2 p[3];
int count=0;
while(tid<nodeNum) {
iy = tid%res; // x axis
iz = (tid%(chunksize*res)/res)/(chunksize/res); // y axis
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res); // z axis
if (iz*res*res+iy*res+ix > nodeNum)
{
return;
}
if (iy > 0 && iy < res-1)
{
if (iz == 0)
{
st[1] = sites_index[iy*res+ix];
num[1] = sites_index[iy*res+ix+1]-st[1];
st[0] = sites_index[(iy-1)*res+ix];
num[0] = sites_index[(iy-1)*res+ix+1]-st[0];
st[2] = sites_index[(iy+1)*res+ix];
num[2] = sites_index[(iy+1)*res+ix+1]-st[2];
if (num[0]>0) current_id[0] = sites[st[0]];
if (num[1]>0) current_id[1] = sites[st[1]];
if (num[2]>0) current_id[2] = sites[st[2]];
prev_id[0] = make_ushort2(LDNIMARKER,LDNIMARKER); //iy-1
prev_id[1] = make_ushort2(LDNIMARKER,LDNIMARKER); //iy
prev_id[2] = make_ushort2(LDNIMARKER,LDNIMARKER); //iy+1
ind[0] = 0;
ind[1] = 0;
ind[2] = 0;
bitResult = 0;
count = 0;
}
if (num[0] > 0 && num[1] > 0 && num[2] > 0)
{
if (iz != current_id[1].x)
{
p[0] = ((prev_id[0].x-iz)*(prev_id[0].x-iz)+(prev_id[0].y-ix)*(prev_id[0].y-ix) < (current_id[0].x-iz)*(current_id[0].x-iz)+(current_id[0].y-ix)*(current_id[0].y-ix))? prev_id[0]:current_id[0];
p[1] = ((prev_id[1].x-iz)*(prev_id[1].x-iz)+(prev_id[1].y-ix)*(prev_id[1].y-ix) < (current_id[1].x-iz)*(current_id[1].x-iz)+(current_id[1].y-ix)*(current_id[1].y-ix))? prev_id[1]:current_id[1];
p[2] = ((prev_id[2].x-iz)*(prev_id[2].x-iz)+(prev_id[2].y-ix)*(prev_id[2].y-ix) < (current_id[2].x-iz)*(current_id[2].x-iz)+(current_id[2].y-ix)*(current_id[2].y-ix))? prev_id[2]:current_id[2];
x1 = interpointY(iy-1, p[0].x, p[0].y, iy, p[1].x, p[1].y, ix, iz) ;
x2 = interpointY(iy, p[1].x, p[1].y, iy+1, p[2].x, p[2].y, ix, iz) ;
if (x1 >= x2)
{
bitResult = bitResult | SetBitPos(iz%32);
count++;
}
}
else
{
prev_id[1] = current_id[1];
ind[1]++;
if (ind[1] >= num[1])
current_id[1] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[1] = sites[st[1]+ind[1]];
}
if (iz == current_id[0].x)
{
//if (ix == 125 && iy == 256)
// printf("--------------\n");
prev_id[0] = current_id[0];
ind[0]++;
if (ind[0] >= num[0])
current_id[0] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[0] = sites[st[0]+ind[0]];
}
if (iz == current_id[2].x)
{
prev_id[2] = current_id[2];
ind[2]++;
if (ind[2] >= num[2])
current_id[2] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[2] = sites[st[2]+ind[2]];
}
if ((iz+1)%32 == 0)
{
bitForNextLoop[(iz/32)*res*res+iy*res+ix]= bitResult;
bitDeleted[(iz/32)*res*res+iy*res+ix]= bitResult;
bitResult = 0;
}
if (iz == res-1)
{
//if (iy==256)
// printf("count %d %d \n", ix, count);
atomicAdd(counter, count);
}
}
}
tid += blockDim.x * gridDim.x;
}
}
//#define ENCODE_16BIT(a, b) (((a) << 8) | (b))
__global__ void LDNIDistanceField__kernelMergeBandsY_32(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[32], ind[32], current_id[32], next_id[32];
float y_inter[32];
short num[32], i, j, k, count;
unsigned int st[32];
unsigned int stack[32]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
if (iy%warpWidth == 0)
{
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
//mask = 65535 << bandNum*(iy%warpWidth); // 65535 = 0x0000ffff
mask = bitresult;// & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (ix == 206 && iz == 300 )
{
printf("1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (ix == 206 && iz == 300 )
{
printf("2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsY_16(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[16], ind[16], current_id[16], next_id[16];
float y_inter[16];
short num[16], i, j, k, count;
unsigned int st[16];
unsigned int stack[16]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
if (iy%warpWidth == 0)
{
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 65535 << bandNum*(iy%warpWidth); // 65535 = 0x0000ffff
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("16-1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("16-2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
//if (ix == 503 && iz == 64)
// printf("-- %d %d \n", iy/warpWidth, ~bitresult);
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsY_8(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[8], ind[8], current_id[8], next_id[8];
float y_inter[8];
short num[8], i, j, k, count;
unsigned int st[8];
unsigned int stack[8]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
if (iy%warpWidth == 0)
{
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 255 << bandNum*(iy%warpWidth); // 255 = 0x000000ff
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d \n", i, iy , iy%32, lasty, bitresult, num[0], num[1]);
}*/
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(mask),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (ix == 256 && iz == 0 && iy == 65)
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
/*if (ix == 256 && iz == 500 && iy==81 )
{
printf("4=! %d %d %d %d %d %f\n", i, iy , iy%32, lasty, bitresult, y1);
}*/
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("8-1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("8-2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
//if (ix == 256 && iz == 500 && iy == 80)
//{
// printf("3=! %d %d \n", mask, count);
//}
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d %d %d\n", k, iy , iy%32, lasty, bitresult, i, j);
}*/
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d\n", j, iy , iy%32, count, GET_PTR(stack[count]));
}*/
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
//if (test > 40) break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsY_4(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[4], ind[4], current_id[4], next_id[4];
float y_inter[4];
short num[4], i, j, k, count;
unsigned int st[4];
unsigned int stack[4]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
if (iy%warpWidth == 0)
{
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 15 << bandNum*(iy%warpWidth); // 15 = 0x0000000f
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d \n", i, iy , iy%32, lasty, bitresult, num[0], num[1]);
}*/
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(mask),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (ix == 256 && iz == 0 && iy == 65)
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
/*if (iz == 1 && ix == 32 && (iy*bandNum+i)==326)
{
printf("4=! %d %d %d %d %d %f %d \n", i, iy , iy%32, lasty, bitresult, y1, (lasty%(warpSize*bandNum))/(bandNum));
}*/
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("4-1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("4-2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
//if (ix == 256 && iz == 500 && iy == 80)
//{
// printf("3=! %d %d \n", mask, count);
//}
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
//if (iz == 1 && ix == 32)
//{
// printf("4=! %d %d %d %d %d %d %d\n", k, iy , iy%32, lasty, bitresult, i, j);
//}
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
//if (iz == 1 && ix == 32)
//{
// printf("5=! %d %d %d %d %f\n", j, iy , iy%32, count, y_inter[j%bandNum]);
//}
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
//if (iz == 1 && ix == 32 && (i >= 300 && i <= 304))
// printf("7=! %d %d %d %d %f\n", i, iy , iy%32, count, y_inter[i%bandNum]);
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
//if (test > 40) break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsY_2(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[2], ind[2], current_id[2], next_id[2];
float y_inter[2];
short num[2], i, j, k, count;
unsigned int st[2];
unsigned int stack[2]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
//if (ix == 65 && j+i == 104 )
// printf(" %d %d %d %d %d %d\n", iz, num[i], stack[i], current_id[i], middle_id[i], sites[st[i]]);
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%2)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 3 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (bitCount(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d \n", i, iy , iy%32, lasty, bitresult, num[0], num[1]);
}*/
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(mask),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (ix == 256 && iz == 0 && iy == 65)
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
/*if (ix == 256 && iz == 500 && iy==81 )
{
printf("4=! %d %d %d %d %d %f\n", i, iy , iy%32, lasty, bitresult, y1);
}*/
//if (iz == 250 && ix == 431 && (iy*bandNum+i) == 96)
//{
// printf("1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431 )
//{
// printf("2-1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("2-2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
//if (ix == 256 && iz == 500 && iy == 80)
//{
// printf("3=! %d %d \n", mask, count);
//}
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d %d %d\n", k, iy , iy%32, lasty, bitresult, i, j);
}*/
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d\n", j, iy , iy%32, count, GET_PTR(stack[count]));
}*/
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
/*
while (mask != 0)
{
lasty = __ffs(mask);
count = 0;
for(i=0; i < bandNum ; i++)
{
j = GET_PTR(stack[i]);
if ( j > 0)
{
lasty = __shfl(y_inter[j%2], (j%(warpSize*bandNum))/(bandNum));
if (y_inter[i] < lasty)
{
j = GET_PTR(lasty);
lasty = __shfl((int)stack[j%2], (j%(warpSize*bandNum))/(bandNum));
y_inter[i] = interpointY(ix, j, GET_STACK(lasty), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
lasty = GET_STACK(stack[i]);
stack[i] = ENCODE_STACK(lasty, k);
count++;
break;
}
}
}
mask = __ballot(count > 0);
}
//------------------------------------------------------
// Store the result
bitresult = 0;
lasty = -1;
if (iy%(warpSize/bandNum) == 0)
{
k = (iy/(warpSize/bandNum))%(bandNum);
lasty = -1;
for(j=warpSize/bandNum-1; j > 0 ; j--)
{
for(i=bandNum; i > 0 ; i--)
{
mask = __shfl((int)stack[i], k*(warpSize/bandNum)+j);
lasty = GET_PTR(mask);
if (lasty >= 0)
{
bitresult = bitresult | SetBitPos(j*bandNum+i);
break;
}
}
if (lasty >= 0) break;
}
while (lasty >= 0)
{
j = lasty%(warpSize*bandNum)/bandNum;
if (j/(warpSize/bandNum) != k) break;
mask = __shfl((int)stack[lasty%2], j);
lasty = GET_PTR(mask);
if (lasty > 0)
{
bitresult = bitresult | SetBitPos(j*bandNum+(lasty%bandNum));
}
}
if (k+1 < bandNum)
{
lasty = __shfl(lasty, (k+1)*(warpSize/bandNum));
if (lasty > 0)
{
bitresult = bitresult & (!(SetBitPos(lasty%32)-1));
}
}
bitDeleted[(iy/(warpSize/bandNum))*res.x*res.z+ix*res.x+iz] = bitresult;
}*/
tid += chunksize;
}
}
__global__ void LDNIDistanceField__MaurerAxisInY(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[BANDWIDTH], ind[BANDWIDTH], current_id[BANDWIDTH], next_id[BANDWIDTH];
short num[BANDWIDTH], i, j, k;
unsigned int st[BANDWIDTH];
short stack[BANDWIDTH], count;
//unsigned int bitresult[BANDWIDTH];
unsigned int bitresult;
float y1, y2;
short ptr[BANDWIDTH];
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
bitresult = 0;
if (iz == 0)
{
j = iy*BANDWIDTH;
for(i=0; i < BANDWIDTH; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
count = 0;
k = -1;
for(i=0; i < BANDWIDTH ; i++)
{
if (num[i]>0)
{
ptr[i] = k;
if (iz < middle_id[i])
stack[i] = current_id[i];
else
{
if (ind[i] < num[i])
{
k = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + k)/2.0);
current_id[i] = next_id[i];
next_id[i] = k;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = current_id[i];
}
count++;
k = i;
}
else
{
stack[i] = -1;
ptr[i] = k;
bitresult = bitresult | SetBitPos(i);
}
//if (iz == 250 && ix == 431 && iy == 3)
/*if (ix == 65 && iy == 3 && i == 8 )
printf(" %d %d %d %d %d %d\n", iz, num[i], stack[i], current_id[i], middle_id[i], sites[st[i]]);*/
}
if (count > 2)
{
k=0;
for(i=0; i < BANDWIDTH ; i++)
{
if (stack[i] > 0)
{
if (k < 2)
{
k++;
continue;
}
while (k>=2)
{
y1 = interpointY(ix, j+ptr[ptr[i]], stack[ptr[ptr[i]]], ix, j+ptr[i], stack[ptr[i]], ix, iz) ;
y2 = interpointY(ix, j+ptr[i], stack[ptr[i]], ix, j+i, stack[i], ix, iz) ;
if (y1 < y2)
break;
//if (iz == 250 && ix == 431 && iy < 4)
//{
// printf("ptr %d %f %f %d %d %d\n", j+i, y1, y2, k, j+ptr[i], stack[ptr[i]]);
//printf("y1 : %d %d %d %d %d %d %d %d \n", ix, j+ptr[ptr[i]], stack[ptr[ptr[i]]], ix, j+ptr[i], stack[ptr[i]], ix, iz);
//printf("y2 : %d %d %d %d %d %d %d %d \n", ix, j+ptr[i], stack[ptr[i]], ix, j+i, stack[i], ix, iz);
//}
//if (ix == 256 && (j+i) == 178 && iz == 0)
k--;
stack[ptr[i]] = -1;
bitresult = bitresult | SetBitPos(ptr[i]);
ptr[i] = ptr[ptr[i]];
}
k++;
}
}
bitDeleted[iy*res.x*res.z+ix*res.x+iz] = bitresult;
//if (iz == 250 && ix == 431 && iy < 4)
//if (ix == 256 && iz ==0)
// printf("--------------%d %d \n", iy, bitresult, count );
//for(i=0; i < BANDWIDTH ; i++)
//{
// bitDeleted[iy*res*res+ix*res+iz]
//}
}
else
{
bitDeleted[iy*res.x*res.z+ix*res.x+iz] = bitresult;
//if (ix == 256 && iz ==0)
// printf("--------------%d %d %d\n", iy, bitresult, count );
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_32(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[32], next_id[16];
float y_inter[32];
short ind[32], num[32], i;
int j, k, count;
unsigned int st[32];
int stack[32]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[32], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
//mask = 65535 << bandNum*(iy%warpWidth);
mask = bitresult;// & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (ix == 250 && iy*bandNum+i == 78)
//{
// printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
if (iz == 311 && ix == 256 )
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
if (iz == 311 && ix == 256 )
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_16(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[16], next_id[16];
float y_inter[16];
short ind[16], num[16], i;
int j, k, count;
unsigned int st[16];
int stack[16]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[16], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 65535 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (ix == 250 && iy*bandNum+i == 78)
//{
// printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 256 )
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 256 )
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_8(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[8], next_id[8];
float y_inter[8];
short ind[8], num[8], i;
int j, k, count;
unsigned int st[8];
int stack[8]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[8], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 255 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (ix == 250 && iy*bandNum+i == 78)
//{
// printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 500 )
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 500 )
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_4(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[4], next_id[4];
float y_inter[4];
short ind[4], num[4], i;
int j, k, count;
unsigned int st[4];
int stack[4]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[4], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 15 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (ix == 250 && iy*bandNum+i == 78)
//{
// printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
/*if (iz == 280 && ix == 280 && iy == 30 )
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
/*if (iz == 280 && ix == 280 && iy == 30 )
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 500 )
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 500 )
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_2(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[2], next_id[2];
float y_inter[2];
short ind[2], num[2], i;
int j, k, count;
unsigned int st[2];
int stack[2]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[2], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
/*if (iz > 280)
{
tid += chunksize;
continue;
}*/
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%2)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 3 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (bitCount(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d \n", i, iy , iy%32, lasty, bitresult, num[0], num[1]);
}*/
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (iz == 280 && ix == 280 && iy*bandNum+i == 64)
//{
// printf("^^^^ %d %d %d %d %d %d %d %d %d %d %d\n",bitresult, iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i], GET_X(stack[i]), GET_Y(stack[i]), GET_Z(stack[i]) );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
//}
//else
//{
// stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
//}
}
//for(i=0; i < bandNum ; i++)
//{
// if (GetBitPos((iy*bandNum+i)%32, bitresult))
// {
// if ((int)iz < middle_id[i])
// stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
//
// else
// {
// if (ind[i] < num[i])
// {
// j = sites[st[i]+ind[i]];
// ind[i]++;
// temp = middlepointY(next_id[i], j, ix);
// while (temp <= middle_id[i])
// {
// next_id[i] = j;
// j = sites[st[i]+ind[i]];
// ind[i]++;
// temp = middlepointY(next_id[i], j, ix);
//
// }
//
// middle_id[i] = temp;
// current_id[i] = next_id[i];
// next_id[i] = j;
// }
// else
// {
// middle_id[i] = LDNIMARKER;
// current_id[i] = next_id[i];
// }
// stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
//
//
// }
// count++;
//
// //if (ix == 250 && iy*bandNum+i == 78)
// //{
// // printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
// //for(int test = 0; test < num[i]; test++)
// // printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
// //printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
// //}
//
// lasty = iy*bandNum + i;
// k = current_id[i]; // keep record the last current_id
//
//
//
// }
// else
// {
// stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
// }
//
//}
//------------------------------------------------------
// Calculate intersection point for each site
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(mask),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (ix == 256 && iz == 0 && iy == 65)
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
/*if (ix == 280 && iz == 280 && iy*bandNum+i==116)
{
//printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
printf("%d %d %d \n", lasty, mask,GetBitPos((iy*bandNum+i)%32, bitresult) );
}*/
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (ix == 280 && iz == 280 && iy*bandNum+i==116)
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 280 && ix == 280 && iy*bandNum+i==116)
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
/*if (iz == 280 && ix == 280 && iy*bandNum+i==116)
{
printf("2-d=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
y_inter[i] = y1;
/*if (iz == 280 && ix == 280 && iy*bandNum+i==116)
{
printf("2-c=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (iz == 111 && ix == 250 )
{
printf("bitresult ! %d %d %d \n", iy , iy%32, bitresult);
}*/
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
/*if (ix == 256 && iz == 500 && iy==81 )
{
printf("4=! %d %d %d %d %d %f\n", i, iy , iy%32, lasty, bitresult, y1);
}*/
//if (iz == 250 && ix == 431 && (iy*bandNum+i) == 96)
//{
// printf("1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 280 && ix == 280 && (iy*bandNum+i)>=100 && (iy*bandNum+i)< 125)
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 280 && ix == 280 && (iy*bandNum+i)>=100 && (iy*bandNum+i)< 125)
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
//if (ix == 256 && iz == 500 && iy == 80)
//{
// printf("3=! %d %d \n", mask, count);
//}
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d %d %d\n", k, iy , iy%32, lasty, bitresult, i, j);
}*/
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
//stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
//y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48))
{
printf("tset %f %d \n %d %d %d %d %d %d %d %d\n", y_inter[j%bandNum], j, GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix);
}*/
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d\n", j, iy , iy%32, count, GET_PTR(stack[count]));
}*/
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__MaurerAxisInX(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int ind[BANDWIDTH], current_id[BANDWIDTH], next_id[BANDWIDTH];
short num[BANDWIDTH], i, j, count;
unsigned int st[BANDWIDTH];
unsigned int stack[BANDWIDTH];
unsigned int bitresult;
float y1, y2;
short ptr[BANDWIDTH];
int middle_id[BANDWIDTH], k, temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
bitresult = 0;
if (iz == 0)
{
j = iy*BANDWIDTH;
for(i=0; i < BANDWIDTH; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
/*if (GET_STACK(sites[st[i]]) == 330 && GET_PTR(sites[st[i]]) == 291)
{
printf(" ^^ %d %d %d %d %d \n", ix, i, iz, GET_STACK(sites[st[i]]), GET_PTR(sites[st[i]]));
}*/
//if (iy == 1 && iz == 0 && ix == 1)
//{
// printf("how ? %d %d %d %d %d %d %d \n",num[i], GET_STACK(sites[st[i]]), GET_PTR(sites[st[i]]), GET_STACK(sites[st[i]+1]), GET_PTR(sites[st[i]+1]), GET_STACK(sites[st[i]+2]), GET_PTR(sites[st[i]+2]) );
//}
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
//middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (iz == 25 && ix == 250)
// printf("num %d %d %d %d %d %d\n", num[i], current_id[i], next_id[i], j+i, middle_id[i]);
}
}
count = 0;
k = -1;
temp = 0;
for(i=0; i < BANDWIDTH ; i++)
{
if (num[i]>0)
{
ptr[i] = k;
if ((int)iz < middle_id[i])
stack[i] = current_id[i];
else
{
if (ind[i] < num[i])
{
k = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], k, ix);
while (temp <= middle_id[i])
{
next_id[i] = k;
k = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], k, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = k;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = current_id[i];
//if ( ix == 250 && iy == 2 && i == 14)
//{
// printf("stack %d %d %d %d %d %d\n", k, ind[i], iz, num[i], middle_id[i], temp);
//// printf("test~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
//}
}
count++;
/*if ( ix == 250 && iy == 2 && i == 14)
{
printf("stack %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), ind[i]);
}*/
//if (iy == 13 && i == 15 && ix == 250 )
//if ( ix == 250 && iy == 2 && i == 1 && iz == 0)
//if (iy == 0 && ix == 250 && i == 25)
/*if (ix == 250 && iy == 2 && i == 14 && iz == 111)
{
printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
for(int test = 0; test < num[i] ; test++)
printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
}*/
k = i;
/*if ( ix == 250 && iy == 3)
{
printf("stack %d %d %d %d %d %d\n", k, ind[i], iz, num[i], middle_id[i], temp);
}*/
}
else
{
stack[i] = -1;
ptr[i] = k;
bitresult = bitresult | SetBitPos(i);
}
//printf("test test? %d %d %d %d %d %d %d \n", ptr[i], i, GET_STACK(current_id[i]),GET_PTR(current_id[i]), middle_id[i] , GET_STACK(next_id[i]),GET_PTR(next_id[i]));
}
if (count > 2)
{
k=0;
for(i=0; i < BANDWIDTH ; i++)
{
//if (iy == 0 && iz ==0 && ix == 0)
// printf("test test %d %d \n", count, stack[i] );
if (GET_PTR(stack[i]) < res.x || GET_STACK(stack[i]) < res.x)
{
if (k < 2)
{
k++;
continue;
}
while (k>=2)
{
//y1 = interpointY(ix, j+ptr[ptr[i]], stack[ptr[ptr[i]]], ix, j+ptr[i], stack[ptr[i]], ix, iz) ;
//y2 = interpointY(ix, j+ptr[i], stack[ptr[i]], ix, j+i, stack[i], ix, iz) ;
//y1 = interpointY(GET_PTR(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_STACK(stack[ptr[ptr[i]]]), GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), ix, iz) ;
//y2 = interpointY(GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_PTR(stack[i]), j+i, GET_STACK(stack[i]), ix, iz) ;
y1 = interpointY(GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), iz, ix) ;
y2 = interpointY(GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), iz, ix) ;
//if (ix == 256 && (j+i) == 178 && iz == 0)
//{
// printf("ptr %d %f %f %d %d %d\n", j+i, y1, y2, k, j, i);
// printf("y1 : %d %d %d %d %d %d %d %d \n", ix, j+ptr[ptr[i]], stack[ptr[ptr[i]]], ix, j+ptr[i], stack[ptr[i]], ix, iz);
// printf("y2 : %d %d %d %d %d %d %d %d \n", ix, j+ptr[i], stack[ptr[i]], ix, j+i, stack[i], ix, iz);
//}
/*if (iy == 1 && iz == 0 && ix == 1)
{
printf("test test? %d %d %f %f %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]) );
printf("y1 : %d %d %d %d %d %d %d %d \n", GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz) ;
printf("y2 : %d %d %d %d %d %d %d %d \n", GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz) ;
}*/
/*if (ix == 250 && j+i == 78 && iz == 111)
{
printf("test test? %d %d %f %f %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]) );
printf("y1 : %d %d %d %d %d %d %d %d \n", GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz) ;
printf("y2 : %d %d %d %d %d %d %d %d \n", GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz) ;
}*/
//if ((j+i) >= 420 && (j+i) <= 440 && iz == 111 && ix == 250)
if (y1 < y2)
break;
//if ((j+i) == 430 && iz == 111 && ix == 250)
/*if (iz == 280 && ix == 280 && (j+i) < 128 && (j+i)>=96)
{
printf("test test? %d %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", bitresult, ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
k--;
stack[ptr[i]] = -1;
bitresult = bitresult | SetBitPos(ptr[i]);
ptr[i] = ptr[ptr[i]];
}
k++;
}
}
bitDeleted[iy*res.x*res.z+ix*res.x+iz] = bitresult;
//if (ix == 256 && iz ==0)
// printf("--------------%d %d \n", iy, bitresult, count );
//for(i=0; i < BANDWIDTH ; i++)
//{
// bitDeleted[iy*res*res+ix*res+iz]
//}
}
else
{
bitDeleted[iy*res.x*res.z+ix*res.x+iz] = bitresult;
//if (ix == 256 && iz ==0)
// printf("--------------%d %d %d\n", iy, bitresult, count );
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInY(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short current_id[3], ind[3];
short prev_id[3];
unsigned int st[3], num[3], bitResult;
float y1, y2;
short z[3];
int count=0;
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
return;
}
if (iy > 0 && iy < res-1)
{
if (iz == 0)
{
st[1] = sites_index[iy*res+ix];
num[1] = sites_index[iy*res+ix+1]-st[1];
st[0] = sites_index[(iy-1)*res+ix];
num[0] = sites_index[(iy-1)*res+ix+1]-st[0];
st[2] = sites_index[(iy+1)*res+ix];
num[2] = sites_index[(iy+1)*res+ix+1]-st[2];
if (num[0]>0) current_id[0] = sites[st[0]];
if (num[1]>0) current_id[1] = sites[st[1]];
if (num[2]>0) current_id[2] = sites[st[2]];
prev_id[0] = LDNIMARKER; //iy-1
prev_id[1] = LDNIMARKER; //iy
prev_id[2] = LDNIMARKER; //iy+1
ind[0] = 0;
ind[1] = 0;
ind[2] = 0;
bitResult = 0;
count = 0;
//if (ix == 125 && iy == 251)
//if (ix == 125 && (iy <=252 && iy>=200))
//{
//printf("%d %d %d \n",num[0], num[1], num[2] );
/*for(int i=0; i<num[0]; i++)
{
printf("sites 0 : %d \n",sites[st[0]+i]);
}*/
//for(int i=0; i<num[1]; i++)
//{
// printf("sites 1 : %d %d %d \n",ix, iy, sites[st[1]+i]);
//}
//printf("------------- \n");
/*for(int i=0; i<num[2]; i++)
{
printf("sites 2 : %d \n",sites[st[2]+i]);
}*/
//}
}
if (num[0] > 0 && num[1] > 0 && num[2] > 0)
{
if (iz != current_id[1])
{
z[0] = (abs((int)(prev_id[0]-iz)) < abs((int)(current_id[0]-iz)))? prev_id[0]:current_id[0];
z[1] = (abs((int)(prev_id[1]-iz)) < abs((int)(current_id[1]-iz)))? prev_id[1]:current_id[1];
z[2] = (abs((int)(prev_id[2]-iz)) < abs((int)(current_id[2]-iz)))? prev_id[2]:current_id[2];
y1 = interpointY(ix, iy-1, z[0], ix, iy, z[1], ix, iz) ;
y2 = interpointY(ix, iy, z[1], ix, iy+1, z[2], ix, iz) ;
if (ix == 125 && iy == 251 && iz == 211)
{
printf("%d %d %d %d %f %f %d\n", iz, z[0], z[1], z[2], y1, y2, count);
printf("a) %d %d %d %d %d %d %d %d \n",ix, iy-1, z[0], ix, iy, z[1], ix, iz);
printf("b) %d %d %d %d %d %d %d %d \n",ix, iy, z[1], ix, iy+1, z[2], ix, iz);
printf(" %d %d %d %d %d %d \n", prev_id[0], prev_id[1], prev_id[2], current_id[0], current_id[1], current_id[2]);
}
if (y1 >= y2)
{
bitResult = bitResult | SetBitPos(iz%32);
count++;
}
}
else
{
prev_id[1] = current_id[1];
ind[1]++;
if (ind[1] >= num[1])
current_id[1] = LDNIMARKER;
else
current_id[1] = sites[st[1]+ind[1]];
}
if (iz == current_id[0])
{
//if (ix == 125 && iy == 256)
// printf("--------------\n");
prev_id[0] = current_id[0];
ind[0]++;
if (ind[0] >= num[0])
current_id[0] = LDNIMARKER;
else
current_id[0] = sites[st[0]+ind[0]];
}
if (iz == current_id[2])
{
prev_id[2] = current_id[2];
ind[2]++;
if (ind[2] >= num[2])
current_id[2] = LDNIMARKER;
else
current_id[2] = sites[st[2]+ind[2]];
}
if ((iz+1)%32 == 0)
{
bitForNextLoop[(iz/32)*res*res+iy*res+ix]= bitResult;
bitDeleted[(iz/32)*res*res+iy*res+ix]= bitResult;
bitResult = 0;
}
if (iz == res-1)
{
//if (iy==256)
// printf("count %d %d \n", ix, count);
atomicAdd(counter, count);
}
}
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInYByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int st = 0, num = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int temp;
int currentSite, prevSite, dist1, dist2;
short currentIndex, ind;
float d;
short i,j;
unsigned int buffer[THREADS_PER_BLOCK] = {0};
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
//printf("error %d %d %d %d %d %d\n", tid, ix, iy, iz,(tid/(chunksize*res)),(tid%(chunksize*res)/(res*res)) );
return;
}
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num > 0)
currentSite = sites[st];
prevSite = 0;
currentIndex = 0;
}
if (num > 0)
{
//if (ix ==512 && iy == 512)
// printf("tid %d %d %d %d %d %d %d %d \n", iz, num, st, prevSite, currentSite, currentIndex, sites[st], sites[st+1]);
if (iz == currentSite)
{
prevSite = currentSite;
currentIndex++;
if (currentIndex >= num)
{
prevSite = 0;
}
//{currentSite = 0;}
else
{currentSite = sites[st+currentIndex];}
}
//if (prevSite <=0 && currentSite > 0)
if (prevSite <=0)
{
dist1 = abs((int)iz-currentSite);
//if(dist1 <= offsetPixel && iz <= currentSite)
if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
/*else if (prevSite > 0 && currentSite <= 0)
{
dist2 = abs((int)iz-prevSite);
if(dist2 <= offsetPixel && iz >= prevSite)
//if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else if (prevSite > 0 && currentSite > 0)*/
else
{
dist2 = abs((int)iz-currentSite);
dist1 = abs((int)iz-prevSite);
if (dist1 <= offsetPixel || dist2 <=offsetPixel)
{
//if (dist1 <= dist2 && iz <= prevSite)
if (dist1 <= dist2)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
}
//else if (dist1 > dist2 && iz <= currentSite)
else
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d %d\n", iz, dist1, dist2, ind, prevSite, currentSite);
}
}
}
if ((iz+1)%32 == 0 && num>0)
{
j=0;
//for(i=max(0,iy-offsetPixel); i<=min(res,iy+offsetPixel); j++,i++)
/*for(i=iy-offsetPixel; i<=iy+offsetPixel; j++,i++)
{
if (i<0 || i >= res) continue;
//if (buffer[j]!=0 && bitSites[(iz/32)*res*res+i*res+ix]!= buffer[j])
if (buffer[j]!=0)
{
atomicOr(&bitSites[(iz/32)*res*res+i*res+ix], buffer[j] );
}
}*/
for(j=0;j<offsetPixel*2+1;j++)
buffer[j]=0;
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInXByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int st = 0, num = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int temp;
int currentSite, prevSite, dist1, dist2;
short currentIndex, ind;
float d;
short i,j;
unsigned int buffer[THREADS_PER_BLOCK] = {0};
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
//printf("error %d %d %d %d %d %d\n", tid, ix, iy, iz,(tid/(chunksize*res)),(tid%(chunksize*res)/(res*res)) );
return;
}
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num > 0)
currentSite = sites[st];
prevSite = 0;
currentIndex = 0;
}
if (num > 0)
{
//if (ix ==512 && iy == 512)
// printf("tid %d %d %d %d %d %d %d %d \n", iz, num, st, prevSite, currentSite, currentIndex, sites[st], sites[st+1]);
if (iz == currentSite)
{
prevSite = currentSite;
currentIndex++;
if (currentIndex >= num)
{prevSite = 0;}
else
{currentSite = sites[st+currentIndex];}
}
if (prevSite <=0)
{
dist1 = abs((int)iz-currentSite);
if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
//printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else
{
dist2 = abs((int)iz-currentSite);
dist1 = abs((int)iz-prevSite);
if (dist1 <= offsetPixel || dist2 <=offsetPixel)
{
if (dist1 <= dist2)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
}
else
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
}
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d %d\n", iz, dist1, dist2, ind, prevSite, currentSite);
}
}
}
if ((iz+1)%32 == 0 && num>0)
{
j=0;
//for(i=max(0,iy-offsetPixel); i<=min(res,iy+offsetPixel); j++,i++)
for(i=ix-offsetPixel; i<=ix+offsetPixel; j++,i++)
{
if (i<0 || i >= res) continue;
if (buffer[j]!=0)
{
atomicOr(&bitSites[(iz/32)*res*res+iy*res+i], buffer[j] );
}
}
for(j=0;j<offsetPixel*2+1;j++)
buffer[j]=0;
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeTexToArray(unsigned short *d_output, int res, unsigned int *table_index, unsigned int* temp_index, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
uint4 temp;
unsigned int num=0,i,st,v,a=0,ind, count = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
st = table_index[iy*res+ix];
num = table_index[iy*res+ix+1]-st;
if (num>0) {
count = bitCount(temp.x);
ind=0;
if (count>0)
{
ind = atomicAdd(&temp_index[iy*res+ix],count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.x);
a = 1 << v;
d_output[st+ind+i] = iz*128 + v;
temp.x = temp.x & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.y);
if (count>0)
{
ind = atomicAdd(&temp_index[iy*res+ix],count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.y);
a = 1 << v;
d_output[st+ind+i] = iz*128 + 32 + v;
temp.y = temp.y & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.z);
if (count>0)
{
ind = atomicAdd(&temp_index[iy*res+ix],count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.z);
a = 1 << v;
d_output[st+ind+i] = iz*128 + 64 + v;
temp.z = temp.z & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.w);
if (count>0)
{
ind = atomicAdd(&temp_index[iy*res+ix],count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.w);
a = 1 << v;
d_output[st+ind+i] = iz*128 + 96 + v;
temp.w = temp.w & (~a);
}
}
//if (ix == 512 && iy == 512)
// printf("what %d %d \n", d_output[st], d_output[st+1]);
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void PBADistanceField__writeCompactArray(int *d_output, int *d_input, unsigned int *counter, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int i;
while(tid<nodeNum) {
if (d_input[tid]> -1)
{
i = atomicAdd(counter, 1);
d_output[i] = d_input[tid];
//if (i == 307076)
// printf("$$$$$ %d %d %d %d %d \n", i, d_input[90000000], GET_X(d_input[90000000]), GET_Y(d_input[90000000]), GET_Z(d_input[90000000]) );
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void PBADistanceField__writeTexToArray(int *d_output, int res, int nodeNum, unsigned int* counter)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
uint4 temp;
unsigned int i,id, count;
unsigned int chunksize = blockDim.x * gridDim.x;
int marker = -1;
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
id=0;
count = 0;
for(i=0; i < 32; i++)
{
id = TOID(ix, iy, iz*128+i, res);
if (GetBitPos(i,temp.x))
{
d_output[id] = ENCODE(ix, iy, iz*128+i);
//if (ix == 125 && iy == 250)
count++;
}
else
{
d_output[id] = -1;
}
}
id=0;
for(i=0; i < 32; i++)
{
id = TOID(ix, iy, iz*128+32+i, res);
if (GetBitPos(i,temp.y))
{
d_output[id] = ENCODE(ix, iy, iz*128+32+i);
count++;
}
else
{
d_output[id] = -1;
}
}
id=0;
for(i=0; i < 32; i++)
{
id = TOID(ix, iy, iz*128+64+i, res);
if (GetBitPos(i,temp.z))
{
d_output[id] = ENCODE(ix, iy, iz*128+64+i);
count++;
}
else
{
d_output[id] = -1;
}
}
id=0;
for(i=0; i < 32; i++)
{
id = TOID(ix, iy, iz*128+96+i, res);
if (GetBitPos(i,temp.w))
{
d_output[id] = ENCODE(ix, iy, iz*128+96+i);
count++;
}
else
{
d_output[id] = -1;
}
}
atomicAdd(counter, count);
tid += chunksize;
}
}
__global__ void LDNIDistanceField__Sort2DArray(unsigned short *d_output, unsigned int *d_index, int res, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int st,num,i,j;
unsigned short tempdepth;
unsigned short depth[512];
while(tid<nodeNum) {
st = d_index[tid];
num = d_index[tid+1]-st;
if (num > 0)
{
if (num > 512) { printf("too many num on one thread!!! %d\n", num); return;};
for(i=0;i<num;i++) depth[i]=d_output[st+i];
for(i=0;i<num;i++) {
for(j=i+1;j<num;j++) {
if (depth[i]>depth[j]) {
printf("sort need ? %d %d \n", depth[i], depth[j]);
tempdepth=depth[i]; depth[i]=depth[j]; depth[j]=tempdepth;
}
}
}
for(i=0;i<num;i++) d_output[st+i]=depth[i];
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__Test(float3 *d_output, int res, unsigned int *counter, ushort2 *site, unsigned int* site_index, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int st, num;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
//ushort3 temp;
ushort2 temp;
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res);
st = site_index[iy*res+ix];
num = site_index[iy*res+ix+1]-st;
if (num > 0)
{
ind = atomicAdd(counter,num);
for(i=0; i < num; i++)
{
temp = site[st+i];
//d_output[ind+i] = make_float3(origin.x+(ww*(temp.x))*width, origin.x+(gw*temp.y), origin.y+(gw*temp.z));
d_output[ind+i] = make_float3(origin.x+(gw*iy), origin.y+(gw*temp.x), origin.z+(gw*temp.y));
}
}
//if (count>0) {
// ind = atomicAdd(counter,count);
// for(i=0; i < count ; i++){
// v = GetFirstBitPos(temp);
// a = 1 << v;
// //d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*32+v))*width);
// d_output[ind+i] = make_float3(origin.x+(ww*(iz*32+v))*width, origin.x+(gw*ix), origin.y+(gw*iy));
// temp = temp & (~a);
// }
//}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeSitesToVBO(float3 *d_output, int res, unsigned int *counter, unsigned int* d_input, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int temp;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = d_input[iz*res*res+iy*res+ix];
count = bitCount(temp);
if (count>0) {
ind = atomicAdd(counter,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp);
a = 1 << v;
//d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*32+v))*width);
d_output[ind+i] = make_float3(origin.x+(ww*(iz*32+v))*width, origin.x+(gw*ix), origin.y+(gw*iy));
temp = temp & (~a);
}
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeResultToVBO(float3 *d_output, int3 res, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, int offdist, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int st, num, ind;
unsigned int current_id, next_id;
unsigned int chunksize = blockDim.x * gridDim.x;
int middle_id, k, temp;
double dist = 0.0;
int dx, dy, dz, id;
float ww = 1.0/float(res.x);
float gw = width/float(res.x);
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
st = sites_index[iy*res.x+ix];
num = sites_index[iy*res.x+ix+1]-st;
if (num > 1)
{
current_id = sites[st];
next_id = sites[st+1];
ind = 2;
middle_id = middlepointX(current_id, next_id, ix , iy);
}
else if (num == 1)
{
current_id = sites[st];
ind = 1;
middle_id = LDNIMARKER;
}
else
{
middle_id = -1;
current_id = LDNIMARKER;
next_id= LDNIMARKER;
ind = 0;
}
}
if (num > 0)
{
if (iz < middle_id)
{
dx = GET_X(current_id)-iz; dy = GET_Y(current_id)-ix; dz = GET_Z(current_id)-iy;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
id = atomicAdd(counter, 1);
d_output[id] = make_float3(origin.x+(gw*iz), origin.y+(gw*ix), origin.z+(gw*iy));
}
}
else
{
if (ind < num)
{
k = sites[st+ind];
ind++;
temp = middlepointX(next_id, k, ix , iy);
while (temp <= middle_id || iz >= temp)
{
next_id = k;
k = sites[st+ind];
ind++;
temp = middlepointX(next_id, k, ix , iy);
}
middle_id = temp;
current_id = next_id;
next_id = k;
}
else
{
middle_id = LDNIMARKER;
current_id = next_id;
}
dx = GET_X(current_id)-iz; dy = GET_Y(current_id)-ix; dz = GET_Z(current_id)-iy;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
id = atomicAdd(counter, 1);
d_output[id] = make_float3(origin.x+(gw*iz), origin.y+(gw*ix), origin.z+(gw*iy));
}
}
//if (ix == 256 && iy == 311)
// printf("current %d %d %d %d %d \n", iz, middle_id, GET_X(current_id), GET_Y(current_id), GET_Z(current_id));
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__countArrayToVBO(int3 res, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, int offdist, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int st, num, ind;
unsigned int current_id, next_id;
unsigned int chunksize = blockDim.x * gridDim.x;
int middle_id, k, temp;
double dist = 0.0;
int dx, dy, dz;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
st = sites_index[iy*res.x+ix];
num = sites_index[iy*res.x+ix+1]-st;
if (num > 1)
{
current_id = sites[st];
next_id = sites[st+1];
ind = 2;
middle_id = middlepointX(current_id, next_id, ix , iy);
}
else if (num == 1)
{
current_id = sites[st];
ind = 1;
middle_id = LDNIMARKER;
}
else
{
middle_id = -1;
current_id = LDNIMARKER;
next_id= LDNIMARKER;
ind = 0;
}
}
if (num > 0)
{
if (iz < middle_id)
{
dx = GET_X(current_id)-iz; dy = GET_Y(current_id)-ix; dz = GET_Z(current_id)-iy;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
atomicAdd(counter, 1);
}
}
else
{
if (ind < num)
{
k = sites[st+ind];
ind++;
temp = middlepointX(next_id, k, ix , iy);
while (temp <= middle_id || iz >= temp)
{
next_id = k;
k = sites[st+ind];
ind++;
temp = middlepointX(next_id, k, ix , iy);
}
middle_id = temp;
current_id = next_id;
next_id = k;
}
else
{
middle_id = LDNIMARKER;
current_id = next_id;
}
dx = GET_X(current_id)-iz; dy = GET_Y(current_id)-ix; dz = GET_Z(current_id)-iy;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
atomicAdd(counter, 1);
}
}
}
tid += chunksize;
}
}
__global__ void PBADistanceField__countArrayToVBO(int res, unsigned int* counter, int *outputDF, int offdist, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
int dx, dy, dz;
int nx, ny, nz;
int id;
double dist = 0.0;
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
// if (ix == 125 && iy == 250)
// printf("dist 0--------------%f %d %d %d \n", dist, ix, iy, iz);
DECODE(outputDF[tid], nx, ny, nz);
//if (ix == 0 && iy == 245 && iz == 231)
// printf("dist 0--------------%d %d %d %d %d %d %d \n", outputDF[tid], nx, ny , nz , ix, iy, iz);
dx = nx - ix; dy = ny - iy; dz = nz - iz;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
atomicAdd(counter, 1);
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void PBADistanceField__writeArrayToVBO(float3 *d_output, int res, unsigned int* counter, int *outputDF, int offdist, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
int dx, dy, dz;
int nx, ny, nz;
int id;
double dist, dist2;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
DECODE(outputDF[tid], nx, ny, nz);
dx = nx - ix; dy = ny - iy; dz = nz - iz;
dist = dx * dx + dy * dy + dz * dz;
dist2 = sqrt(dist);
if (floor(dist2) == offdist )
//if (dist >= offdist && dist <= offdist+1)
{
id = atomicAdd(counter, 1);
d_output[id] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(gw*iz));
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeArrayToVBO(float3 *d_output, int res, unsigned int* table_index, unsigned int *m_3dArray, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int temp;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = m_3dArray[tid];
count = bitCount(temp);
if (count>0) {
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*32+v))*width);
temp = temp & (~a);
}
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeTexToVBO(float3 *d_output, int res, int* table_index, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
uint4 temp;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
count = bitCount(temp.x);
if (count>0) {
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.x);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*128+v))*width);
temp.x = temp.x & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.y);
if (count>0){
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.y);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*128+32+v))*width);
temp.y = temp.y & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.z);
if (count>0){
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.z);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*128+64+v))*width);
temp.z = temp.z & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.w);
if (count>0){
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.w);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*128+96+v))*width);
temp.w = temp.w & (~a);
}
}
tid += blockDim.x * gridDim.x;
}
/*int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
//uint4 temp;
unsigned int value;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
//temp = tex3D(uint_tex3D,ix,iy,iz);
value = tex1D(site_tex, tid);
count = bitCount(value);
if (count>0) {
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(value);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*32+v))*width);
value = value & (~a);
}
}
tid += blockDim.x * gridDim.x;
}*/
}
__global__ void PBADistanceField_kernelPropagateInterband(int *output, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int inc = bandSize * size * size;
int nz, nid, nDist, myDist;
int pixel;
// Top row, look backward
int tz = __mul24(band, bandSize);
int topId = TOID(tx, ty, tz, size);
int bottomId = TOID(tx, ty, tz + bandSize - 1, size);
pixel = tex1Dfetch(pbaTexColor, topId);
nz = GET_Z(pixel);
myDist = abs(nz - tz);
for (nid = bottomId - inc; nid >= 0; nid -= inc) {
pixel = tex1Dfetch(pbaTexColor, nid);
if (pixel != PBAMARKER) {
nz = pixel & 0x3ff;
nDist = abs(nz - tz);
if (nDist < myDist)
output[topId] = pixel;
break;
}
}
// Last row, look downward
tz = tz + bandSize - 1;
pixel = tex1Dfetch(pbaTexColor, bottomId);
nz = GET_Z(pixel);
myDist = abs(nz - tz);
for (int ii = tz + 1, nid = topId + inc; ii < size; ii += bandSize, nid += inc) {
pixel = tex1Dfetch(pbaTexColor, nid);
if (pixel != PBAMARKER) {
nz = pixel & 0x3ff;
nDist = abs(nz - tz);
if (nDist < myDist)
output[bottomId] = pixel;
break;
}
}
}
__global__ void PBADistanceField_kernelFloodZ(int *output, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tz = band * bandSize;
int plane = size * size;
int id = TOID(tx, ty, tz, size);
int pixel1, pixel2;
pixel1 = PBAMARKER;
// Sweep down
for (int i = 0; i < bandSize; i++, id += plane) {
pixel2 = tex1Dfetch(pbaTexColor, id);
//if (tx == 256 && ty == 132 && tz == 0) printf("1 %d %d %d %d\n", pixel2, tx, ty, tz);
if (pixel2 != PBAMARKER)
pixel1 = pixel2;
output[id] = pixel1;
//if (id == 67840) printf("1 %d %d %d %d\n", pixel1, tx, ty, tz);
}
int dist1, dist2, nz;
id -= plane + plane;
// Sweep up
for (int i = bandSize - 2; i >= 0; i--, id -= plane) {
//if (id == 67840) printf("2 %d \n", pixel1);
nz = GET_Z(pixel1);
//if (id == 67840) printf("3 %d \n", nz);
dist1 = abs(nz - (tz + i));
//if (id == 67840) printf("4 %d \n", dist1);
pixel2 = output[id];
//if (id == 67840) printf("5 %d \n", pixel2);
nz = GET_Z(pixel2);
//if (id == 67840) printf("6 %d \n", nz);
dist2 = abs(nz - (tz + i));
//if (id == 67840) printf("7 %d %d %d\n", dist2, dist1, pixel1);
if (dist2 < dist1)
pixel1 = pixel2;
output[id] = pixel1;
//if (id == 67840) printf("8 %d \n", pixel1);
}
}
__global__ void PBADistanceField_kernelUpdateVertical(int *output, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tz = band * bandSize;
int id = TOID(tx, ty, tz, size);
int plane = size * size;
int top = tex1Dfetch(pbaTexLinks, id);
int bottom = tex1Dfetch(pbaTexLinks, TOID(tx, ty, tz + bandSize - 1, size));
int topZ = GET_Z(top);
int bottomZ = GET_Z(bottom);
int pixel;
int dist, myDist, nz;
for (int i = 0; i < bandSize; i++, id += plane) {
pixel = tex1Dfetch(pbaTexColor, id);
nz = GET_Z(pixel);
myDist = abs(nz - (tz + i));
dist = abs(topZ - (tz + i));
if (dist < myDist) { myDist = dist; pixel = top; }
dist = abs(bottomZ - (tz + i));
if (dist < myDist) pixel = bottom;
output[id] = pixel;
}
}
__global__ void PBADistanceField_kernelMaurerAxis(int *stack, int size, int mod, int bandSize, int test)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = band * bandSize;
int tz = blockIdx.y * blockDim.y + threadIdx.y;
int lastY = INFINITY;
int stackX_1, stackY_1 = INFINITY, stackZ_1, stackX_2, stackY_2 = INFINITY, stackZ_2;
int p = PBAMARKER, nx, ny, nz, s1, s2;
float i1, i2;
for (int i = 0; i < bandSize; i++, ty++) {
p = tex1Dfetch(pbaTexColor, TOID(tx, ty, tz, size));
//if (tx == 1 && (ty < 64 && ty >= 32) && tz == 200 && test == 1)
//if (tz == 250 && ty == 33 && tx <= 512 && test == 1)
//if (tx == 431 && tz == 250 && test == 0 && ty > 80 && ty < 101)
//if (tz == 250 && test == 1 && ty == 25)
//if (tz == 250 && ty == 65 && test == 1)
//if (tz == 250 && tx == 62 && test == 0)
//{
// DECODE(p, nx, ny, nz);
//if (ny == 330 && nz == 291 && test == 1)
// printf("ptr %d %d %d %d %d %d %d\n", tx, ty, tz , nx, ny, nz , i);
//}
//if (tx == 256 && tz == 0 && ty == 132)
//{
// printf("ptr %d %d %d\n", ty, p, TOID(tx, ty, tz, size ));
//printf("y1 : %d %d %d %d %d %d %d %d %d \n",stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, s1);
//printf("y2 : %d %d %d %d %d %d %d %d %d \n", stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz, s2);
//}
if (p != PBAMARKER) {
while (stackY_2 != INFINITY) {
DECODE(s1, stackX_1, stackY_1, stackZ_1);
DECODE(s2, stackX_2, stackY_2, stackZ_2);
i1 = interpointY(stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz);
DECODE(p, nx, ny, nz);
i2 = interpointY(stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz);
/*if (tx == 256 && tz == 0 && ty == 132)
{
printf("ptr %d %f %f %d %d\n", ty, i1, i2, i, lastY);
printf("y1 : %d %d %d %d %d %d %d %d %d \n",stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, s1);
printf("y2 : %d %d %d %d %d %d %d %d %d \n", stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz, s2);
}*/
//if (tx == 1 && (ty < 64 && ty >= 32) && tz == 0 && test == 1)
//{
//printf("ptr %d %d %d %f %f %d %d\n", tx, ty, tz , i1, i2, i, lastY);
//printf("y1 : %d %d %d %d %d %d %d %d %d \n",stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, s1);
//printf("y2 : %d %d %d %d %d %d %d %d %d \n", stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz, s2);
//}
/*if (tz == 250 && (ty >= 416 && ty < 448) && tx == 0 && test == 1)
{
printf("ptr %d %d %d %f %f %d %d\n", tx, ty, tz , i1, i2, i, lastY);
printf("y1 : %d %d %d %d %d %d %d %d %d \n",stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, s1);
printf("y2 : %d %d %d %d %d %d %d %d %d \n", stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz, s2);
}*/
/*if (tx == 431 && tz == 250 && test == 0 && ty > 80 && ty < 101)
{
printf("ptr %d %d %d %f %f %d %d\n", tx, ty, tz , i1, i2, i, lastY);
}*/
//if (tz == 250 && tx == 111 && (ty <= 440 && ty >= 420))
if (i1 < i2)
break;
//if (tz == 250 && ty == 33 && tx <= 512 && test == 1)
/*if (tz == 280 && tx == 280 && ty < 128 && ty >= 96 && test == 1)
{
printf("ptr %d %d %d %f %f %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", tx, ty, tz , i1, i2, i, lastY, stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz);
}*/
lastY = stackY_2; s2 = s1; stackY_2 = stackY_1;
if (stackY_2 != INFINITY)
s1 = stack[TOID(tx, stackY_2, tz, size)];
}
DECODE(p, nx, ny, nz);
s1 = s2; s2 = ENCODE(nx, lastY, nz);
stackY_2 = lastY; lastY = ty;
stack[TOID(tx, ty, tz, size)] = s2;
/*if (tx == 431 && tz == 250 && test == 0 && ty > 80 && ty < 101)
{
DECODE(s2, nx, ny, nz);
//if (ny == 330 && nz == 291 && test == 1)
printf("ptr2 %d %d %d %d %d %d %d\n", tx, ty, tz , nx, ny, nz , s2);
}*/
}
}
if (p == PBAMARKER)
stack[TOID(tx, ty-1, tz, size)] = ENCODE(INFINITY, lastY, INFINITY);
}
__global__ void PBADistanceField_kernelMergeBands(int *stack, int *forward, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band1 = (blockIdx.x / mod) * 2;
int band2 = band1 + 1;
int tx = blkX * blockDim.x + threadIdx.x;
int tz = blockIdx.y * blockDim.y + threadIdx.y;
int firstY, lastY, next, p, id;
int3 stack_1, stack_2, current;
float i1, i2;
firstY = band2 * bandSize;
lastY = firstY - 1;
/*if ( tx == 431 && tz == 250)
{
int nx, ny, nz;
p = tex1Dfetch(pbaTexLinks, TOID(431, 97, 250, size));
DECODE(p, nx, ny, nz);
// //if (ny == 330 && nz == 291 && test == 1)
printf("ptr %d %d %d %d %d %d \n", tx, bandSize, tz , nx, ny, nz );
}*/
// Band 1, get the two last items
p = tex1Dfetch(pbaTexLinks, TOID(tx, lastY, tz, size));
DECODE(p, stack_2.x, stack_2.y, stack_2.z);
/*if ( tx == 431 && tz == 250 && bandSize == 64 && band2 == 1)
{
printf("ptr111 %d %d %d %d \n", lastY, stack_2.x, stack_2.y, stack_2.z );
}*/
if (stack_2.x == INFINITY) { // Not a site
lastY = stack_2.y;
if (lastY != INFINITY) {
p = tex1Dfetch(pbaTexLinks, TOID(tx, lastY, tz, size));
DECODE(p, stack_2.x, stack_2.y, stack_2.z);
}
}
if (stack_2.y != INFINITY) {
p = tex1Dfetch(pbaTexLinks, TOID(tx, stack_2.y, tz, size));
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
}
// Band 2, get the first item
next = tex1Dfetch(pbaTexPointer, TOID(tx, firstY, tz, size));
if (next < 0) // Not a site
firstY = -next;
if (firstY != INFINITY) {
id = TOID(tx, firstY, tz, size);
p = tex1Dfetch(pbaTexLinks, id);
DECODE(p, current.x, current.y, current.z);
}
/*if ( tx == 431 && tz == 250 && bandSize == 64 && band2 == 1)
{
printf("ptr222 %d %d %d %d %d %d %d %d %d %d %d\n", firstY, band2, stack_1.x, stack_1.y, stack_1.z, stack_2.x, stack_2.y, stack_2.z, current.x, current.y, current.z );
}*/
int top = 0;
int count = 0; //Deb
while (top < 2 && firstY != INFINITY) {
while (stack_2.y != INFINITY) {
i1 = interpointY(stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, tx, tz);
i2 = interpointY(stack_2.x, lastY, stack_2.z, current.x, firstY, current.z, tx, tz);
//if (tx == 503 && tz == 70)
// printf("-- %d %d \n", lastY, stack_2.y);
if (tz == 280 && tx == 280 && bandSize == 32 && lastY == 116 )// && firstY < 70 )
{
//printf("!----------- %d %d %d %f %f %d %d %d \n", stack_2.y, lastY, firstY, i1, i2, stack_1.z, stack_2.z, current.z);
printf("y1 : %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d %f %f\n", stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, stack_2.x, lastY, stack_2.z, current.x, firstY, current.z, tx, tz, i1, i2);
}
if (i1 < i2)
break;
//if (bandSize == 128 && tz == 311 && tx == 256 )// && firstY < 70 )
//if (bandSize == 128 && tz == 250 && tx == 431)
count++;
lastY = stack_2.y; stack_2 = stack_1;
top--;
if (stack_2.y != INFINITY) {
p = stack[TOID(tx, stack_2.y, tz, size)];
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
}
}
// Update pointers to link the current node to the stack
stack[id] = ENCODE(current.x, lastY, current.z);
//if (tz == 250 && tx == 431 && bandSize == 64 && band2 == 1)
//{
//int3 test;
//DECODE(stack[TOID(tx, 97, tz, size)], test.x, test.y, test.z);
//printf("stack %d %d %d %d %d %d %d \n", bandSize, test.x, test.y, test.z, current.x, lastY, current.z );
// printf("stack %d %d %d %d %d %d %d \n", bandSize, id%size, (id/(size))%size, id/(size*size), current.x, lastY, current.z );
//}
if (lastY != INFINITY)
{
forward[TOID(tx, lastY, tz, size)] = firstY;
//if (tz == 250 && tx == 431 && bandSize == 64 && band2 == 1)
// printf("forward %d %d %d \n", bandSize, lastY, firstY );
}
top = max(1, top + 1);
// Advance the current pointer forward
stack_1 = stack_2; stack_2 = make_int3(current.x, lastY, current.z); lastY = firstY;
firstY = tex1Dfetch(pbaTexPointer, id);
if (firstY != INFINITY) {
id = TOID(tx, firstY, tz, size);
p = tex1Dfetch(pbaTexLinks, id);
DECODE(p, current.x, current.y, current.z);
}
}
//if (count >= 39)
//printf("test %d %d %d %d\n", tx, tz, count, bandSize);
// Update the head pointer
firstY = band1 * bandSize;
lastY = band2 * bandSize;
if (tex1Dfetch(pbaTexPointer, TOID(tx, firstY, tz, size)) == -INFINITY)
forward[TOID(tx, firstY, tz, size)] = -abs(tex1Dfetch(pbaTexPointer, TOID(tx, lastY, tz, size)));
// Update the tail pointer
firstY = band1 * bandSize + bandSize - 1;
lastY = band2 * bandSize + bandSize - 1;
p = tex1Dfetch(pbaTexLinks, TOID(tx, lastY, tz, size));
DECODE(p, current.x, current.y, current.z);
if (current.x == INFINITY && current.y == INFINITY) {
p = tex1Dfetch(pbaTexLinks, TOID(tx, firstY, tz, size));
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
if (stack_1.x == INFINITY)
current.y = stack_1.y;
else
current.y = firstY;
stack[TOID(tx, lastY, tz, size)] = ENCODE(current.x, current.y, current.z);
//if (tz == 250 && tx == 431 && bandSize == 64 && band2 == 1)
//{
// printf("-stack %d %d %d %d %d \n", bandSize, lastY, current.x, current.y, current.z );
//}
}
/* if (tz == 250 && tx == 431 && bandSize == 256)
{
int nx, ny, nz;
for(int a = 0; a < 512 ; a++)
{
DECODE(stack[TOID(tx, a, tz, size)], nx, ny, nz);
printf("%d %d %d %d \n", a, nx, ny, nz);
}
}*/
}
__global__ void PBADistanceField_kernelCreateForwardPointers(int *output, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = (band+1) * bandSize - 1;
int tz = blockIdx.y * blockDim.y + threadIdx.y;
int lasty = INFINITY, nexty;
int current, id;
// Get the tail pointer
current = tex1Dfetch(pbaTexLinks, TOID(tx, ty, tz, size));
if (GET_X(current) == INFINITY)
nexty = GET_Y(current);
else
nexty = ty;
id = TOID(tx, ty, tz, size);
for (int i = 0; i < bandSize; i++, ty--, id -= size)
if (ty == nexty) {
output[id] = lasty;
nexty = GET_Y(tex1Dfetch(pbaTexLinks, id));
lasty = ty;
}
// Store the pointer to the head at the first pixel of this band
if (lasty != ty + 1)
output[id + size] = -lasty;
}
__global__ void PBADistanceField_kernelColorAxis(int *output, int size)
{
__shared__ int3 s_Stack1[BLOCKX], s_Stack2[BLOCKX];
__shared__ int s_lastY[BLOCKX];
__shared__ float s_ii[BLOCKX];
int col = threadIdx.x;
int tid = threadIdx.y;
int tx = blockIdx.x * blockDim.x + col;
int tz = blockIdx.y;
int3 stack_1, stack_2;
int p, lastY;
float ii;
if (tid == blockDim.y - 1) {
lastY = size - 1;
p = tex1Dfetch(pbaTexColor, TOID(tx, lastY, tz, size));
DECODE(p, stack_2.x, stack_2.y, stack_2.z);
if (stack_2.x == INFINITY) { // Not a site
lastY = stack_2.y;
if (lastY != INFINITY) {
p = tex1Dfetch(pbaTexColor, TOID(tx, lastY, tz, size));
DECODE(p, stack_2.x, stack_2.y, stack_2.z);
}
}
if (stack_2.y != INFINITY) {
p = tex1Dfetch(pbaTexColor, TOID(tx, stack_2.y, tz, size));
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
ii = interpointY(stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, tx, tz);
}
//if (tz == 250 && tx == 431)
//{
//printf("~~~~%f %d %f \n", s_ii[col], col, ii);
// printf("~~~ %d %d %d %d %d %d %d %d %d \n", blockDim.y - 1, stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, tx, tz);
//}
s_Stack1[col] = stack_1; s_Stack2[col] = stack_2; s_lastY[col] = lastY; s_ii[col] = ii;
}
__syncthreads();
if (tz == 311 && tx == 256)
{
/*int nx, ny, nz;
for(int a = 0; a < 512 ; a++)
{
p = tex1Dfetch(pbaTexColor, TOID(tx, a, tz, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n",a, nx, ny,nz);
}*/
/*p = tex1Dfetch(pbaTexColor, TOID(431, 97, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 97, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 98, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 98, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 99, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n",99, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 100, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 100, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 101, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 101, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 102, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 102, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 103, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 103, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 104, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 104, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 105, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 105, nx, ny,nz);*/
}
for (int ty = size - 1 - tid; ty >= 0; ty -= blockDim.y) {
stack_1 = s_Stack1[col]; stack_2 = s_Stack2[col]; lastY = s_lastY[col]; ii = s_ii[col];
/**/
//if (tz == 250 && tx == 431)
// printf("@@@ %d %d %d %d %d %d \n",tx, ty, tz, stack_2.x, lastY, stack_2.z);
while (stack_2.y != INFINITY) {
if (ty > ii)
break;
/*if (tz == 250 && tx == 431 )
{
printf("------ %d %f %d\n", ty, ii, stack_2.y);
}*/
lastY = stack_2.y; stack_2 = stack_1;
if (stack_2.y != INFINITY) {
p = tex1Dfetch(pbaTexColor, TOID(tx, stack_2.y, tz, size));
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
ii = interpointY(stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, tx, tz);
}
}
__syncthreads();
/*if (tz == 250 && tx == 431 )
{
printf("Encode %d %d %d %d \n", ty, stack_2.x, lastY, stack_2.z);
}*/
output[TOID(tx, ty, tz, size)] = ENCODE(stack_2.x, lastY, stack_2.z);
if (tid == blockDim.y - 1) {
s_Stack1[col] = stack_1; s_Stack2[col] = stack_2; s_lastY[col] = lastY; s_ii[col] = ii;
}
__syncthreads();
}
//if (tz == 280 && tx == 280)
//{
// int nx, ny, nz;
// for(int a = 0; a < 512 ; a++)
// {
// p = output[TOID(tx, a, tz, size)];//tex1Dfetch(pbaTexColor, TOID(431, a, 250, size));
// DECODE(p, nx,ny, nz);
// printf("%d %d %d %d \n",a, nx,ny, nz);
// }
//}
}
__global__ void PBADistanceField_kernelTransposeXY(int *data, int log2Width, int mask)
{
__shared__ int block1[BLOCKXY][BLOCKXY + 1];
__shared__ int block2[BLOCKXY][BLOCKXY + 1];
int blkX = blockIdx.y;
int blkY = blockIdx.x >> log2Width;
int blkZ = blockIdx.x & mask;
if (blkX > blkY)
return ;
int x, y, z, id1, id2;
int pixel;
blkX = __mul24(blkX, BLOCKXY);
blkY = __mul24(blkY, BLOCKXY);
z = blkZ << log2Width;
// read the cube into shared memory
x = blkX + threadIdx.x;
y = blkY + threadIdx.y;
id1 = ((z + y) << log2Width) + x;
block1[threadIdx.y][threadIdx.x] = data[id1];
x = blkY + threadIdx.x;
y = blkX + threadIdx.y;
id2 = ((z + y) << log2Width) + x;
block2[threadIdx.y][threadIdx.x] = data[id2];
__syncthreads();
if (id2 == 0) printf("------------------------------------- hahahaha\n");
// write the rotated cube to global memory
pixel = block1[threadIdx.x][threadIdx.y];
data[id2] = ROTATEXY(pixel);
pixel = block2[threadIdx.x][threadIdx.y];
data[id1] = ROTATEXY(pixel);
}
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
__constant__ unsigned int MultiplyDeBruijnBitPosition[] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 };
__constant__ unsigned char BitReverseTable256[] =
{
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
};
__device__ inline unsigned int bitCount(unsigned int i)
{
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return ((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
}
__device__ inline unsigned int Reversebit(unsigned int v)
{
unsigned int r;
r = (BitReverseTable256[ v & 0xff] << 24) |
(BitReverseTable256[(v >> 8) & 0xff] << 16) |
(BitReverseTable256[(v >> 16) & 0xff] << 8) |
(BitReverseTable256[(v >> 24) & 0xff]);
return r;
}
__device__ inline unsigned int GetFirstBitPos(unsigned int source)
{
return (MultiplyDeBruijnBitPosition[((unsigned int)((source & -source) * 0x077CB531U)) >> 27]);
}
__device__ inline unsigned int GetLastBitPos(unsigned int source)
{
unsigned int r = Reversebit(source);
return (31-(MultiplyDeBruijnBitPosition[((unsigned int)((r & -r) * 0x077CB531U)) >> 27]));
}
__device__ inline unsigned int SetBitPos(unsigned int pos)
{
return (1 << pos);
}
__device__ inline bool GetBitPos(unsigned int pos, unsigned int source)
{
return (source & (1 << pos));
}
__device__ inline float interpointY(int x1, int y1, int z1, int x2, int y2, int z2, int x0, int z0)
{
float xM = (x1 + x2) / 2.0f;
float yM = (y1 + y2) / 2.0f;
float zM = (z1 + z2) / 2.0f;
float nx = x2 - x1;
float ny = y2 - y1;
float nz = z2 - z1;
return yM + (nx * (xM - x0) + nz * (zM - z0)) / ny;
}
__device__ inline int middlepointY(unsigned int site1, unsigned int site2, int z0)
{
int dy22 = (GET_PTR(site2)-z0)*(GET_PTR(site2)-z0);
int dy12 = (GET_PTR(site1)-z0)*(GET_PTR(site1)-z0);
int d1 = GET_STACK(site1);
int d2 = GET_STACK(site2);
return int(0.5 * ((dy22-dy12)/(float)(d2-d1) + d1+d2))+1;
}
__device__ inline int middlepointX(unsigned int site1, unsigned int site2, int y0, int z0)
{
int xPlusx = GET_X(site1) + GET_X(site2);
int xMinusx = GET_X(site1) - GET_X(site2);
int yPlusy = GET_Y(site1) + GET_Y(site2);
int yMinusy = GET_Y(site1) - GET_Y(site2);
int zPlusz = GET_Z(site1) + GET_Z(site2);
int zMinusz = GET_Z(site1) - GET_Z(site2);
return int(0.5 * ((zMinusz*(zPlusz-2.0*z0)+yMinusy*(yPlusy-2.0*y0))/(float)xMinusx + xPlusx))+1;
}
|
c9a19d7ffda8aad68e31e2acb06c3d5b8e06d949.cu
|
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <sys/stat.h>
#include "../common/GL/glew.h"
//#include <GL/glew.h>
//#include <GL/glaux.h>
#include "cuda.h"
#include "cutil.h"
#include "cuda_gl_interop.h"
#include "..\GLKLib\GLK.h"
#include "PMBody.h"
#include "LDNIcpuSolid.h"
#include "LDNIcudaSolid.h"
#include "LDNIcudaOperation.h"
#include <thrust/version.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
//--------------------------------------------------------------------------------------------
extern __global__ void LDNIDistanceField_CountBitInInteger(unsigned int *d_index, int nodeNum, int res);
extern __global__ void LDNIDistanceField_CountBitInArray(unsigned int *d_index, unsigned int *m_3dArray, int nodeNum, int res);
extern __global__ void LDNIDistanceField__writeTexToVBO(float3 *d_output, int res, int* table_index, float width, float3 origin, int nodeNum);
extern __global__ void LDNIDistanceField__writeTexToArray(unsigned short *d_output, int res, unsigned int *table_index, unsigned int* temp_index, int nodeNum);
extern __global__ void LDNIDistanceField__writeArrayToVBO(float3 *d_output, int res, unsigned int* table_index, unsigned int *m_3dArray, float width, float3 origin, int nodeNum);
extern __global__ void LDNIDistanceField__Sort2DArray(unsigned short *d_output, unsigned int *d_index, int res, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInYByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *site_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__FilterProbablySiteInYByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInXByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *site_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__FilterProbablySiteInXByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInX(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, ushort2 *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInY(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInXLoop(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, ushort2 *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum, short loopID);
extern __global__ void LDNIDistanceField__GenerateProbablySiteInYLoop(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum, short loopID);
extern __global__ void LDNIDistanceField__CountProbablySiteInY(unsigned int *bitDeleted, unsigned int *counter, int res, int nodeNum);
extern __global__ void LDNIDistanceField__SortProbablySite(unsigned int *sites, unsigned int *sites_index, int res, int nodeNum);
extern __global__ void LDNIDistanceField__SortProbablySite2(unsigned int *sites, unsigned int *sites_index, int res, int nodeNum);
extern __global__ void LDNIDistanceField__GetSiteByDist(ushort3 *sites, unsigned int *counter, unsigned int *sites_index, unsigned int *sites_off, int offdist, int res, int nodeNum);
extern __global__ void LDNIDistanceField__writeSitesToVBO(float3 *d_output, int res, unsigned int *counter, unsigned int* d_input, float width, float3 origin, int nodeNum);
extern __global__ void LDNIDistanceField__Test(float3 *d_output, int res, unsigned int *counter, ushort2 *site, unsigned int* site_index, float width, float3 origin, int nodeNum);
extern __global__ void LDNIDistanceField__GetProbablySiteInY(unsigned int *bitDeleted, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, unsigned short *sites_x, unsigned int *sites_index_x, int3 res, int nodeNum);
extern __global__ void LDNIDistanceField__GetProbablySiteInX(unsigned int *bitDeleted, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, unsigned int *sites_in, unsigned int *sites_index_in, int3 res, int nodeNum);
extern __global__ void LDNIDistanceField__MaurerAxisInY(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__MaurerAxisInX(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_2(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_4(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_8(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_16(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsY_32(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_2(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_4(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_8(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_16(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__kernelMergeBandsX_32(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum);
extern __global__ void LDNIDistanceField__countArrayToVBO(int3 res, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, int offdist, int nodeNum);
extern __global__ void LDNIDistanceField__writeResultToVBO(float3 *d_output, int3 res, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, int offdist, float width, float3 origin, int nodeNum);
//-----------------------------PBA Distance Field---------------------------------------------------------------
extern __global__ void PBADistanceField__writeTexToArray(int *d_output, int res, int nodeNum, unsigned int* counter);
extern __global__ void PBADistanceField_kernelFloodZ(int *output, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelPropagateInterband(int *output, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelUpdateVertical(int *output, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelMaurerAxis(int *stack, int size, int mod, int bandSize, int test);
extern __global__ void PBADistanceField_kernelMergeBands(int *stack, int *forward, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelCreateForwardPointers(int *output, int size, int mod, int bandSize);
extern __global__ void PBADistanceField_kernelColorAxis(int *output, int size);
extern __global__ void PBADistanceField_kernelTransposeXY(int *data, int log2Width, int mask);
extern __global__ void PBADistanceField__writeArrayToVBO(float3 *d_output, int res, unsigned int* counter, int *outputDF, int offdist, float width, float3 origin, int nodeNum);
extern __global__ void PBADistanceField__countArrayToVBO(int res, unsigned int* counter, int *outputDF, int offdist, int nodeNum);
extern __global__ void PBADistanceField__writeCompactArray(int *d_output, int *d_input, unsigned int *counter, int nodeNum);
//--------------------------------------------------------------------------------------------
extern __device__ unsigned int bitCount(unsigned int i);
extern __device__ unsigned int GetFirstBitPos(unsigned int source);
extern __device__ unsigned int GetLastBitPos(unsigned int source);
extern __device__ unsigned int SetBitPos(unsigned int pos);
extern __device__ float interpointY(int x1, int y1, int z1, int x2, int y2, int z2, int x0, int z0);
extern __device__ bool GetBitPos(unsigned int pos, unsigned int source);
extern __device__ unsigned int Reversebit(unsigned int v);
extern __device__ int middlepointY(unsigned int site1, unsigned int site2, int z0);
extern __device__ int middlepointX(unsigned int site1, unsigned int site2, int y0, int z0);
//texture<unsigned int> site_tex;
texture<uint4,3> site_tex;
#define BANDWIDTH 32
#define MAX_INT 201326592
#define PBAMARKER -1
#define INFINITY 0x3ff
#define TOID(x, y, z, w) (__mul24(__mul24(z, w) + (y), w) + (x))
#define TOID_CPU(x, y, z, w) ((z) * (w) * (w) + (y) * (w) + (x))
#define ENCODE(x, y, z) (((x) << 20) | ((y) << 10) | (z))
#define DECODE(value, x, y, z) \
x = (value) >> 20; \
y = ((value) >> 10) & 0x3ff; \
z = (value) & 0x3ff
#define GET_X(value) ((value) >> 20)
#define GET_Y(value) (((value) >> 10) & 0x3ff)
#define GET_Z(value) (((value) == PBAMARKER) ? MAX_INT : ((value) & 0x3ff))
#define ROTATEXY(x) ((((x) & 0xffc00) << 10) | \
(((x) & 0x3ff00000) >> 10) | \
((x) & 0x3ff))
#define BLOCKX 32
#define BLOCKY 4
#define BLOCKXY 16
#define GET_STACK(value) ((value >> 16) & 0xffff)
#define GET_PTR(value) ((value) & 0xffff)
#define ENCODE_STACK(a, b) (((a) << 16) | (b & 0xffff))
#define ENCODE_STACK_3(a, b, c) (((a) << 20) | ((b) << 10) | (c & 0x3ff))
#define ENCODE_PTR(value, b) ((value & 0xffff0000) | (b & 0xffff))
#define ENCODE_Z(value, z) ((value & 0xfffffC00) | (z & 0x3ff))
texture<int> pbaTexColor;
texture<int> pbaTexLinks;
//texture<short> pbaTexPointer;
texture<int> pbaTexPointer;
void LDNIcudaOperation::PBADistanceFieldGeneration(QuadTrglMesh *mesh, GLuint *vbo, unsigned int &vbosize, int res, int offdist, float boundingBox[])
{
if (res > 512) return;
int fboSize = res;
int nVertices;
int phase1Band = 16;
int phase2Band = 16;
int phase3Band = 2;
int **pbaTextures;
int pbaMemSize;
int pbaCurrentBuffer;
int pbaTexSize;
pbaTextures = (int **) malloc(2 * sizeof(int *));
pbaTexSize = fboSize;
pbaMemSize = pbaTexSize * pbaTexSize * pbaTexSize * sizeof(int);
CUDA_SAFE_CALL(cudaMalloc((void **) &pbaTextures[0], pbaMemSize));
CUDA_SAFE_CALL(cudaMalloc((void **) &pbaTextures[1], pbaMemSize));
// PBA initialization
if (!PBADistanceField_SitesGeneration(mesh, vbo, nVertices, res, boundingBox, pbaTextures[0]))
return;
pbaCurrentBuffer = 0;
// Read sites to CPU
int *sites;
printf("Start %d \n", nVertices);
unsigned int* counter;
CUDA_SAFE_CALL(cudaMalloc((void**) &counter, sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( counter, 0, sizeof(unsigned int)) );
CUDA_SAFE_CALL(cudaMalloc((void**) &sites, nVertices*sizeof(int)));
CUDA_SAFE_CALL(cudaMemset( sites, 0, nVertices*sizeof(int)) );
PBADistanceField__writeCompactArray<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites, pbaTextures[0], counter, res*res*res);
int* cpu_sites = (int*)malloc(nVertices*sizeof(int));
CUDA_SAFE_CALL(cudaMemcpy( cpu_sites, sites, nVertices*sizeof(int),cudaMemcpyDeviceToHost));
printf("End\n");
// Compute the 3D distance field
/************* Compute Z axis *************/
// --> (X, Y, Z)
pbaCurrentBuffer = PBADistanceField_pba3DColorZAxis(pbaTextures, res, phase1Band, pbaCurrentBuffer);
/************* Compute Y axis *************/
// --> (X, Y, Z)
pbaCurrentBuffer = PBADistanceField_pba3DComputeProximatePointsYAxis(pbaTextures, res, phase2Band, pbaCurrentBuffer, 0);
pbaCurrentBuffer = PBADistanceField_pba3DColorYAxis(pbaTextures, res, phase3Band, pbaCurrentBuffer);
// --> (Y, X, Z)
PBADistanceField_pba3DTransposeXY(pbaTextures[pbaCurrentBuffer], res);
cudaThreadSynchronize();
printf("starting X ==================================\n");
/************** Compute X axis *************/
// Compute X axis
pbaCurrentBuffer = PBADistanceField_pba3DComputeProximatePointsYAxis(pbaTextures, res, phase2Band, pbaCurrentBuffer, 1);
pbaCurrentBuffer = PBADistanceField_pba3DColorYAxis(pbaTextures, res, phase3Band, pbaCurrentBuffer);
// --> (Y, X, Z)
PBADistanceField_pba3DTransposeXY(pbaTextures[pbaCurrentBuffer], res);
cudaFree(sites);
cudaFree(pbaTextures[1-pbaCurrentBuffer]);
char inputStr[10];
printf("\Check Error (very slow)? (y/n): ");
scanf("%s",inputStr);
if (inputStr[0]=='y' || inputStr[0]=='Y')
{
PBADistanceField_CompareResult(pbaTextures[pbaCurrentBuffer], res, nVertices, cpu_sites);
}
free(cpu_sites);
// Generate Offset & display
cudaGraphicsResource *resource;
float gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
float width = gWidth*(float)res;
float origin[3];
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
//unsigned int* counter;
//CUDA_SAFE_CALL(cudaMalloc((void**) &counter, sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( counter, 0, sizeof(unsigned int)) );
PBADistanceField__countArrayToVBO<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(res, counter, pbaTextures[pbaCurrentBuffer], offdist, res*res*res);
CUDA_SAFE_CALL(cudaMemcpy( &vbosize, counter, sizeof(unsigned int),cudaMemcpyDeviceToHost));
printf("size ---------- %ld \n", vbosize);
if (vbosize <= 0)
{
printf("Error in PBA Distance Computation !!! \n");
cudaFree(pbaTextures[0]);
cudaFree(pbaTextures[1]);
cudaFree(counter);
free(pbaTextures);
return;
}
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, vbosize*3*sizeof(float), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CUDA_SAFE_CALL(cudaGraphicsGLRegisterBuffer(&resource, *vbo, cudaGraphicsRegisterFlagsWriteDiscard));
CUDA_SAFE_CALL(cudaGraphicsMapResources(1, &resource, 0));
size_t num_bytes;
float3 *dptr;
CUDA_SAFE_CALL(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, resource));
CUDA_SAFE_CALL(cudaMemset( counter, 0, sizeof(unsigned int)) );
PBADistanceField__writeArrayToVBO<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(dptr, res, counter, pbaTextures[pbaCurrentBuffer], offdist, width, make_float3(origin[0],origin[1],origin[2]), res*res*res);
CUDA_SAFE_CALL(cudaGraphicsUnmapResources(1, &resource, 0));
printf("CUDA mapped VBO: VBO Size %ld bytes\n", vbosize);
cudaFree(pbaTextures[pbaCurrentBuffer]);
free(pbaTextures);
cudaFree(counter);
}
int LDNIcudaOperation::PBADistanceField_pba3DColorZAxis(int **pbaTextures, int res, int m1, int cbuffer)
{
int pbaCurrentBuffer = cbuffer;
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((res / block.x) * m1, res / block.y);
cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]);
PBADistanceField_kernelFloodZ<<< grid, block >>>(pbaTextures[1 - pbaCurrentBuffer], res, res / block.x, res / m1);
pbaCurrentBuffer = 1 - pbaCurrentBuffer;
if (m1 > 1) {
// Passing information between bands
cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]);
PBADistanceField_kernelPropagateInterband<<< grid, block >>>(pbaTextures[1 - pbaCurrentBuffer], res, res / block.x, res / m1);
cudaBindTexture(0, pbaTexLinks, pbaTextures[1 - pbaCurrentBuffer]);
PBADistanceField_kernelUpdateVertical<<< grid, block >>>(pbaTextures[pbaCurrentBuffer], res, res / block.x, res / m1);
}
return pbaCurrentBuffer;
}
int LDNIcudaOperation::PBADistanceField_pba3DComputeProximatePointsYAxis(int **pbaTextures, int res, int m2, int cbuffer, int test)
{
int pbaCurrentBuffer = cbuffer;
int iStack = 1 - pbaCurrentBuffer;
int iForward = pbaCurrentBuffer;
dim3 block = dim3(BLOCKX, BLOCKY);
dim3 grid = dim3((res / block.x) * m2, res / block.y);
//printf("forward %d %d \n",iStack, iForward);
// Compute proximate points locally in each band
cudaBindTexture(0, pbaTexColor, pbaTextures[pbaCurrentBuffer]);
PBADistanceField_kernelMaurerAxis<<< grid, block >>>(pbaTextures[iStack], res, res / block.x, res / m2, test);
//cudaThreadSynchronize();
// Construct forward pointers
cudaBindTexture(0, pbaTexLinks, pbaTextures[iStack]);
PBADistanceField_kernelCreateForwardPointers<<< grid, block >>>(pbaTextures[iForward], res, res / block.x, res / m2);
//
cudaBindTexture(0, pbaTexPointer, pbaTextures[iForward]);
// Repeatly merging two bands into one
for (int noBand = m2; noBand > 1; noBand /= 2) {
grid = dim3((res / block.x) * (noBand / 2), res / block.y);
PBADistanceField_kernelMergeBands<<< grid, block >>>(pbaTextures[iStack],
pbaTextures[iForward], res, res / block.x, res / noBand);
//printf("test %d %d %d %d\n", iForward, iStack, m2);
//break;
}
cudaUnbindTexture(pbaTexLinks);
cudaUnbindTexture(pbaTexColor);
cudaUnbindTexture(pbaTexPointer);
return pbaCurrentBuffer;
}
int LDNIcudaOperation::PBADistanceField_pba3DColorYAxis(int **pbaTextures, int res, int m3, int cbuffer)
{
int pbaCurrentBuffer = cbuffer;
dim3 block = dim3(BLOCKX, m3);
dim3 grid = dim3(res / block.x, res);
cudaBindTexture(0, pbaTexColor, pbaTextures[1 - pbaCurrentBuffer]);
PBADistanceField_kernelColorAxis<<< grid, block >>>(pbaTextures[pbaCurrentBuffer], res);
cudaUnbindTexture(pbaTexColor);
return pbaCurrentBuffer;
}
void LDNIcudaOperation::PBADistanceField_pba3DTransposeXY(int *&inputDF, int res)
{
dim3 block(BLOCKXY, BLOCKXY);
dim3 grid((res / BLOCKXY) * res, res / BLOCKXY);
int log2Width;
int tmp = res;
log2Width = 0;
while (tmp > 1) { tmp /= 2; log2Width++; }
PBADistanceField_kernelTransposeXY<<< grid, block >>>(inputDF, log2Width, res - 1);
}
void LDNIcudaOperation::DistanceFieldGeneration(QuadTrglMesh *mesh, GLuint *vbo, unsigned int &vbosize, int res, int offdist, float boundingBox[])
{
int arrsize = res*res;
unsigned int* sites_index;
unsigned short *sites;
int siteNum;
LDNIDistanceField_SitesGeneration(mesh, vbo, siteNum, res, boundingBox, sites_index, sites);
if (siteNum <= 0)
{
cudaFree(sites);
cudaFree(sites_index);
return ;
}
//check whether the sites on each ray are sorted (Just for in case, should be sorted during the writing kernel)
//LDNIDistanceField__Sort2DArray<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites, sites_index, res, res*res);
//LDNIDistanceField__GenerateProbablySiteInYByGivenDistance<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitSites, sites, sites_index, res, offdist, res*res*res);
//LDNIDistanceField__GenerateProbablySiteInXByGivenDistance<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitSites, sites, sites_index, res, offdist, res*res*res);
//LDNIDistanceField__FilterProbablySiteInYByGivenDistance<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitSites, sites, sites_index, res, offdist-1, res*res*res);
//LDNIDistanceField__FilterProbablySiteInXByGivenDistance<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitSites, sites, sites_index, res, offdist-1, res*res*res);
long time = clock();
unsigned int* bitDeleted;
int bitsize = res*res*(res/32);
CUDA_SAFE_CALL(cudaMalloc((void**) & bitDeleted, bitsize*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( bitDeleted, 0, bitsize*sizeof(unsigned int)) );
LDNIDistanceField__MaurerAxisInY<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites, sites_index, make_int3(res, res/BANDWIDTH, res), offdist, res*(res/BANDWIDTH)*res);
cudaThreadSynchronize();
if (res > 32)
{
LDNIDistanceField__kernelMergeBandsY_2<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites, sites_index, make_int3(res, res/2, res), offdist, 2, 16, res*(res/2)*res);
//printf("Y-32\n");
}
if (res > 64)
{
LDNIDistanceField__kernelMergeBandsY_4<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites, sites_index, make_int3(res, res/4, res), offdist, 4, 8, res*(res/4)*res);
//printf("Y-64\n");
}
if (res > 128){
LDNIDistanceField__kernelMergeBandsY_8<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites, sites_index, make_int3(res, res/8, res), offdist, 8, 4, res*(res/8)*res);
//printf("Y-128\n");
}
if (res > 256)
{
LDNIDistanceField__kernelMergeBandsY_16<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites, sites_index, make_int3(res, res/16, res), offdist, 16, 2, res*(res/16)*res);
//printf("Y-256\n");
}
if (res > 512)
{
LDNIDistanceField__kernelMergeBandsY_32<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites, sites_index, make_int3(res, res/32, res), offdist, 32, 1, res*(res/32)*res);
//printf("Y-512\n");
}
cudaThreadSynchronize();
printf("time 1 : %ld(ms) \n", clock()-time); time = clock();
unsigned int* sites_index_y;
unsigned int* numofBit = (unsigned int*)malloc(sizeof(unsigned int));
CUDA_SAFE_CALL(cudaMalloc((void**) &sites_index_y, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( sites_index_y, 0, (arrsize+1)*sizeof(unsigned int)));
LDNIDistanceField__CountProbablySiteInY<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites_index_y, res, res*res);
thrust::device_ptr<unsigned int> dev_ptr(sites_index_y); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
numofBit[0]=dev_ptr[arrsize];
cudaThreadSynchronize();
printf("time 2 : %ld(ms) \n", clock()-time); time = clock();
printf("Get Sites in Y : %d \n", numofBit[0]);
unsigned int* sites_y;
unsigned int* temp2D;
CUDA_SAFE_CALL(cudaMalloc((void**) &sites_y, numofBit[0]*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( sites_y, 0, numofBit[0]*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMalloc((void**) &temp2D, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( temp2D, 0, (arrsize+1)*sizeof(unsigned int)));
LDNIDistanceField__GetProbablySiteInY<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, temp2D, sites_y, sites_index_y, sites, sites_index, make_int3(res, res/BANDWIDTH, res), res*(res/BANDWIDTH)*res);
cudaFree(temp2D);
cudaFree(sites_index);
cudaFree(sites);
CUDA_SAFE_CALL(cudaMemset( bitDeleted, 0, bitsize*sizeof(unsigned int)) );
//LDNIDistanceField__SortProbablySite<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites_y, sites_index_y, res, res*res);
printf("time 3 : %ld(ms) \n", clock()-time); time = clock();
/* //for debugging
thrust::device_ptr<unsigned int> dev_ptr2(temp2D); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr2, dev_ptr2+(arrsize+1), dev_ptr2); // in-place scan
numofBit[0]=dev_ptr2[arrsize];
printf("Proved Sites in Y : %d \n", numofBit[0]);*/
//-------------------------------X direction---------------------------------------//
LDNIDistanceField__MaurerAxisInX<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites_y, sites_index_y, make_int3(res, res/BANDWIDTH, res), offdist, res*(res/BANDWIDTH)*res);
cudaThreadSynchronize();
printf("time 4 : %ld(ms) \n", clock()-time); time = clock();
if (res > 32)
LDNIDistanceField__kernelMergeBandsX_2<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites_y, sites_index_y, make_int3(res, res/2, res), offdist, 2, 16, res*(res/2)*res);
if (res > 64)
LDNIDistanceField__kernelMergeBandsX_4<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites_y, sites_index_y, make_int3(res, res/4, res), offdist, 4, 8, res*(res/4)*res);
if (res > 128)
LDNIDistanceField__kernelMergeBandsX_8<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites_y, sites_index_y, make_int3(res, res/8, res), offdist, 8, 4, res*(res/8)*res);
if (res > 256)
LDNIDistanceField__kernelMergeBandsX_16<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites_y, sites_index_y, make_int3(res, res/16, res), offdist, 16, 2, res*(res/16)*res);
if (res > 512)
LDNIDistanceField__kernelMergeBandsX_32<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites_y, sites_index_y, make_int3(res, res/32, res), offdist, 32, 1, res*(res/32)*res);
cudaThreadSynchronize();
printf("time 5 : %ld(ms) \n", clock()-time); time = clock();
unsigned int* sites_index_x;
CUDA_SAFE_CALL(cudaMalloc((void**) &sites_index_x, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( sites_index_x, 0, (arrsize+1)*sizeof(unsigned int)));
LDNIDistanceField__CountProbablySiteInY<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, sites_index_x, res, res*res);
thrust::device_ptr<unsigned int> dev_ptr2(sites_index_x); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr2, dev_ptr2+(arrsize+1), dev_ptr2); // in-place scan
numofBit[0]=dev_ptr2[arrsize];
cudaThreadSynchronize();
printf("time 6 : %ld(ms) \n", clock()-time); time = clock();
printf("Get Sites in X : %d \n", numofBit[0]);
unsigned int* sites_x;
CUDA_SAFE_CALL(cudaMalloc((void**) &sites_x, numofBit[0]*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( sites_x, 0, numofBit[0]*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMalloc((void**) &temp2D, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( temp2D, 0, (arrsize+1)*sizeof(unsigned int)));
LDNIDistanceField__GetProbablySiteInX<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(bitDeleted, temp2D, sites_x, sites_index_x, sites_y, sites_index_y, make_int3(res, res/BANDWIDTH, res), res*(res/BANDWIDTH)*res);
//for debugging
/*thrust::device_ptr<unsigned int> dev_ptr3(temp2D); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr3, dev_ptr3+(arrsize+1), dev_ptr3); // in-place scan
numofBit[0]=dev_ptr3[arrsize];
printf("Proved Sites in Y : %d \n", numofBit[0]);*/
cudaThreadSynchronize();
printf("time 7 : %ld(ms) \n", clock()-time); time = clock();
cudaFree(temp2D);
cudaFree(sites_index_y);
cudaFree(sites_y);
cudaFree(bitDeleted);
//LDNIDistanceField__SortProbablySite2<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites_x, sites_index_x, res, res*res);
//-------------------------------Get Sites for Rendering---------------------------------------//
//Display
cudaGraphicsResource *resource;
float gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
float width = gWidth*(float)res;
float origin[3];
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
unsigned int* counter;
CUDA_SAFE_CALL(cudaMalloc((void**) &counter, sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( counter, 0, sizeof(unsigned int)));
LDNIDistanceField__countArrayToVBO<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(make_int3(res, res, res), counter, sites_x, sites_index_x, offdist, res*res*res);
CUDA_SAFE_CALL(cudaMemcpy( numofBit, counter, sizeof(unsigned int),cudaMemcpyDeviceToHost));
//vbosize = LDNIDistanceField_ReadArrayToVBO(rr, vbo, bitDeleted, res, width, origin);
//-----------------------------------------------------------------------------------//
printf("Final Site %d \n", numofBit[0]);
if (numofBit[0] <= 0)
{
cudaFree(bitDeleted);
cudaFree(sites_index_x);
cudaFree(sites_x);
cudaFree(counter);
return;
}
vbosize = numofBit[0];
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, numofBit[0]*3*sizeof(float), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CUDA_SAFE_CALL(cudaGraphicsGLRegisterBuffer(&resource, *vbo, cudaGraphicsRegisterFlagsWriteDiscard));
CUDA_SAFE_CALL(cudaGraphicsMapResources(1, &resource, 0));
size_t num_bytes;
float3 *dptr;
CUDA_SAFE_CALL(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, resource));
CUDA_SAFE_CALL(cudaMemset( counter, 0, sizeof(unsigned int)) );
LDNIDistanceField__writeResultToVBO<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(dptr, make_int3(res, res, res), counter, sites_x, sites_index_x, offdist, width, make_float3(origin[0],origin[1],origin[2]), res*res*res);
CUDA_SAFE_CALL(cudaMemcpy( &vbosize, counter, sizeof(unsigned int),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaGraphicsUnmapResources(1, &resource, 0));
printf("CUDA mapped VBO: VBO Size %ld %ld bytes\n", vbosize, numofBit[0]);
//-----------------------------------------------------------------------------------//
cudaFree(counter);
cudaFree(bitDeleted);
cudaFree(sites_index_x);
cudaFree(sites_x);
}
int LDNIcudaOperation::LDNIDistanceField_ReadArrayToVBO(cudaGraphicsResource *resource, GLuint *vbo, unsigned int *m_3dArray, int res, float width, float origin[3])
{
unsigned int* countVertex;
CUDA_SAFE_CALL(cudaMalloc((void**) & countVertex,sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( countVertex, 0, sizeof(unsigned int)) );
// Declare Host Variable
int* vbo_size = (int*)malloc(sizeof(int));
//Step 1 : Find out the size of VBO
LDNIDistanceField_CountBitInArray<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(countVertex, m_3dArray, res*res*(res/32), res);
CUDA_SAFE_CALL(cudaMemcpy( vbo_size, countVertex, sizeof(unsigned int),cudaMemcpyDeviceToHost));
printf("Distance Offset: VBO Size %ld bytes\n", vbo_size[0]);
if (vbo_size[0] <= 0) return 0;
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, vbo_size[0]*3*sizeof(float), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CUDA_SAFE_CALL(cudaGraphicsGLRegisterBuffer(&resource, *vbo, cudaGraphicsRegisterFlagsWriteDiscard));
CUDA_SAFE_CALL(cudaGraphicsMapResources(1, &resource, 0));
size_t num_bytes;
float3 *dptr;
CUDA_SAFE_CALL(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, resource));
CUDA_SAFE_CALL(cudaMemset( countVertex, 0, sizeof(int)) );
LDNIDistanceField__writeArrayToVBO<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(dptr, res, countVertex, m_3dArray, width, make_float3(origin[0],origin[1],origin[2]), res*res*(res/32));
CUDA_SAFE_CALL(cudaMemcpy( vbo_size, countVertex, sizeof(int),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaGraphicsUnmapResources(1, &resource, 0));
printf("CUDA mapped VBO: VBO Size %ld bytes\n", vbo_size[0]);
cudaFree(countVertex);
return vbo_size[0];
}
int LDNIcudaOperation::LDNIDistanceField_Read3DTextureToVBO(cudaGraphicsResource *resource, GLuint* vbo, int res, float width, float origin[3])
{
/*int* countVertex;
CUDA_SAFE_CALL(cudaMalloc((void**) & countVertex,sizeof(int)));
CUDA_SAFE_CALL(cudaMemset( countVertex, 0, sizeof(int)) );
// Declare Host Variable
int* vbo_size = (int*)malloc(sizeof(int));
//Step 1 : Find out the size of VBO
LDNIDistanceField_CountBitInInteger<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(countVertex, res*res*(res/128), res);
CUDA_SAFE_CALL(cudaMemcpy( vbo_size, countVertex, sizeof(int),cudaMemcpyDeviceToHost));
printf("CUDA mapped VBO: VBO Size %ld bytes\n", vbo_size[0]);
if (vbo_size[0] <= 0) return 0;
//Step 2 : Create the VBO
glGenBuffers(1, vbo);
glBindBuffer(GL_ARRAY_BUFFER, *vbo);
glBufferData(GL_ARRAY_BUFFER, vbo_size[0]*3*sizeof(float), 0, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
CUDA_SAFE_CALL(cudaGraphicsGLRegisterBuffer(&resource, *vbo, cudaGraphicsRegisterFlagsWriteDiscard));
//Step 3 : Write VBO
CUDA_SAFE_CALL(cudaGraphicsMapResources(1, &resource, 0));
size_t num_bytes;
float3 *dptr;
CUDA_SAFE_CALL(cudaGraphicsResourceGetMappedPointer((void **)&dptr, &num_bytes, resource));
CUDA_SAFE_CALL(cudaMemset( countVertex, 0, sizeof(int)) );
LDNIDistanceField__writeTexToVBO<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(dptr, res, countVertex, width, make_float3(origin[0],origin[1],origin[2]), res*res*(res/128));
CUDA_SAFE_CALL(cudaMemcpy( vbo_size, countVertex, sizeof(int),cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaGraphicsUnmapResources(1, &resource, 0));
printf("CUDA mapped VBO: VBO Size %ld bytes\n", vbo_size[0]);
cudaFree(countVertex);
return vbo_size[0];*/
return 0;
}
bool LDNIcudaOperation::PBADistanceField_SitesGeneration(QuadTrglMesh *mesh, GLuint *vbo, int &vbosize, int res, float boundingBox[], int *&inputDF)
{
const bool bCube=true;
float origin[3],gWidth, width; long time=clock(),totalTime=clock();
int i,nodeNum;
char fileadd[256];
if ((boundingBox[0]==boundingBox[1]) && (boundingBox[2]==boundingBox[3]) && (boundingBox[4]==boundingBox[5])) {
mesh->CompBoundingBox(boundingBox);
if (bCube) {
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
}
}
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
width = gWidth*(float)res;
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
if (glewIsSupported("GL_VERSION_2_0")) {printf("\nReady for OpenGL 2.0\n");} else {printf("OpenGL 2.0 not supported\n"); return false;}
//-----------------------------------------------------------------------------------------
int dispListIndex; GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
GLuint vertexTexture;
const char *VshaderString[1],*GshaderString[1],*FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
char str[4096] = ""; int xF,yF;
short nAxis;
GLenum buffers[16] = {GL_COLOR_ATTACHMENT0_EXT, GL_COLOR_ATTACHMENT1_EXT, GL_COLOR_ATTACHMENT2_EXT, GL_COLOR_ATTACHMENT3_EXT
, GL_COLOR_ATTACHMENT4_EXT, GL_COLOR_ATTACHMENT5_EXT, GL_COLOR_ATTACHMENT6_EXT, GL_COLOR_ATTACHMENT7_EXT
, GL_COLOR_ATTACHMENT8_EXT, GL_COLOR_ATTACHMENT9_EXT, GL_COLOR_ATTACHMENT10_EXT, GL_COLOR_ATTACHMENT11_EXT
, GL_COLOR_ATTACHMENT12_EXT, GL_COLOR_ATTACHMENT13_EXT, GL_COLOR_ATTACHMENT14_EXT, GL_COLOR_ATTACHMENT15_EXT};
//-----------------------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\sampleLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\newSampleLDNIGShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\voxelLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Fragment Shader Compile Error\n\n %s", str); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL! 1 \n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// texture setting for fragment shader
//memset(fileadd,0,256*sizeof(char));
//strcat(fileadd, "Outdata");
int maxColorBuffers, maxTextureSize;
int layer = res/128;
glGetIntegerv( GL_MAX_COLOR_ATTACHMENTS_EXT, &maxColorBuffers );
glGetIntegerv( GL_MAX_3D_TEXTURE_SIZE_EXT, &maxTextureSize );
int z_tile = ceil(layer/(float)maxColorBuffers);
printf("max texture size %d %d\n", maxTextureSize, layer);
char value[10];
for(i=0; i < min(layer, maxColorBuffers); i++){
memset(fileadd,0,256*sizeof(char));
strcat(fileadd, "Outdata");
value[0] = '\0';
sprintf(value, "%d", i+1 );
strcat(fileadd, value);
glBindFragDataLocationEXT(g_programObj,i,fileadd);
}
int tilesize = min(layer, maxColorBuffers)*128;
//-------------------------------------------------------------------------------
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating texture for vertex array and binding
long texBindingTime=clock();
glGetError(); // for clean-up the error generated before
nodeNum=mesh->GetNodeNumber(); _texCalProduct(nodeNum,xF,yF);
int temp;
for(temp=1;temp<xF;temp *= 2) {}
xF = temp; //if (xF<64) xF=64;
yF = (int)(nodeNum/xF)+1; if (yF<64) yF=64;
printf("Texture Size: xF=%d yF=%d\n",xF,yF);
float* verTex=(float*)malloc(xF*yF*3*sizeof(float));
memset(verTex,0,xF*yF*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glGenTextures(1, &vertexTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, vertexTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB32F_ARB, xF, yF, 0, GL_RGB, GL_FLOAT, verTex);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, 0);
free(verTex);
if (glGetError()!=GL_NO_ERROR) printf("Error: GL_TEXTURE_RECTANGLE_ARB texture binding!\n\n");
texBindingTime=clock()-texBindingTime;
printf("\nTime for binding texture onto the graphics memory - %ld (ms)\n\n",texBindingTime);
//-----------------------------------------------------------------------------------------
// Step 3: building GL-list for activating the geometry shader
unsigned int ver[4];
int faceNum=mesh->GetFaceNumber();
dispListIndex = glGenLists(1);
glNewList(dispListIndex, GL_COMPILE);
glBegin(GL_POINTS);
for(i=0;i<faceNum;i++) {
mesh->GetFaceNodes(i+1,ver[0],ver[1],ver[2],ver[3]);
glVertex3i(ver[0]-1,ver[1]-1,ver[2]-1);
if (mesh->IsQuadFace(i+1)) {glVertex3i(ver[0]-1,ver[2]-1,ver[3]-1);} // one more triangle
}
glEnd();
glEndList();
//-----------------------------------------------------------------------------------------
// Step 4: using program objects and the texture
GLint id0,id1,id2,id3,id4; float centerPos[3];
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB,vertexTexture);
glUseProgramObjectARB(g_programObj);
id0 = glGetUniformLocationARB(g_programObj,"sizeNx");
glUniform1iARB(id0,xF);
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
id1 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id1,centerPos[0],centerPos[1],centerPos[2]);
id2 = glGetUniformLocationARB(g_programObj,"res");
glUniform1iARB(id2,res);
id3 = glGetUniformLocationARB(g_programObj,"tilesize");
glUniform1iARB(id3,tilesize);
if (glGetError()!=GL_NO_ERROR) printf("Error: vertex texture binding!\n\n");
printf("Create shader texture\n");
//-----------------------------------------------------------------------------------------
// Step 5: Prepare 3D texture for voxelization
GLuint PrimitiveVoxel[3];
glEnable(GL_TEXTURE_3D_EXT);
glGenTextures(1, &PrimitiveVoxel[0]); // x-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[0]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
//if res <= 2048 , create texture directly. Otherwise, need to subdivide the texture
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glGenTextures(1, &PrimitiveVoxel[1]); // y-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[1]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glGenTextures(1, &PrimitiveVoxel[2]); // z-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[2]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
//-----------------------------------------------------------------------------------------
// Step 6: Voxelization
GLuint fbo;
int buffersize = min(layer, maxColorBuffers);
int tile;
for(tile=0; tile < z_tile; tile++)
{
for(nAxis=0; nAxis < 3; nAxis++)
{
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,fbo);
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[nAxis]);
for(int a=tile*maxColorBuffers; a < min(maxColorBuffers,layer-(tile*maxColorBuffers)); a++) glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, buffers[a] ,GL_TEXTURE_3D_EXT, PrimitiveVoxel[nAxis], 0, a);
id4 = glGetUniformLocationARB(g_programObj,"tile");
glUniform1iARB(id4,tile);
glDrawBuffers(buffersize,buffers);
glEnable(GL_DEPTH_TEST);
glDisable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR);
glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glViewport(0,0,res,res);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-width*0.5,width*0.5,-width*0.5,width*0.5,width*0.5,-width*0.5);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glClearColorIuiEXT(0,0,0,0);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glCallList(dispListIndex);
glFlush();
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glDisable(GL_COLOR_LOGIC_OP);
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
glEnable(GL_POINT_SMOOTH);
glClearColorIuiEXT(0,0,0,0);
glDeleteFramebuffersEXT (1,&fbo);
}
}
glUseProgramObjectARB(0);
glDeleteLists(dispListIndex, 1);
glBindTexture( GL_TEXTURE_RECTANGLE_ARB, 0);
glDeleteTextures(1, &vertexTexture);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
//-----------------------------------------------------------------------------------------
// Step 7: Build Composite Shader
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\CompositeVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Composite Vertex Shader Compile Error\n\n %s ", str); return false;
}
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\CompositeFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Composite Fragment Shader Compile Error\n\n %s", str); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL! \n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
for(i=0; i < min(layer, maxColorBuffers); i++){
memset(fileadd,0,256*sizeof(char));
strcat(fileadd, "Outdata");
value[0] = '\0';
sprintf(value, "%d", i+1 );
strcat(fileadd, value);
glBindFragDataLocationEXT(g_programObj,i,fileadd);
}
//-------------------------------------------------------------------------------
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 8: Composite the voxelization result
cudaGraphicsResource *resource;
int t_index = glGetAttribLocation( g_programObj, "in_coord");
CUDA_SAFE_CALL( cudaGraphicsGLRegisterImage(&resource, PrimitiveVoxel[2], GL_TEXTURE_3D, cudaGraphicsMapFlagsReadOnly) );
for(tile=0; tile < z_tile; tile++)
{
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,fbo);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[2]);
for(int a=tile*maxColorBuffers; a < min(maxColorBuffers,layer-(tile*maxColorBuffers)); a++)
glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, buffers[a] ,GL_TEXTURE_3D_EXT, PrimitiveVoxel[2], 0, a);
//CUDA_SAFE_CALL( cudaGraphicsGLRegisterImage(&resource, PrimitiveVoxel[2], GL_TEXTURE_3D, cudaGraphicsMapFlagsReadOnly) );
glUseProgramObjectARB(g_programObj);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[0]);
glDisable(GL_TEXTURE_3D_EXT);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[1]);
glDisable(GL_TEXTURE_3D_EXT);
GLuint fetchXIndex = glGetSubroutineIndex(g_programObj, GL_FRAGMENT_SHADER, "FetchTextureX");
GLuint fetchYIndex = glGetSubroutineIndex(g_programObj, GL_FRAGMENT_SHADER, "FetchTextureY");
GLint tex0;
tex0 = glGetUniformLocationARB(g_programObj,"Xtex");
glUniform1iARB(tex0,0);
tex0 = glGetUniformLocationARB(g_programObj,"Ytex");
glUniform1iARB(tex0,1);
id0 = glGetUniformLocationARB(g_programObj,"res");
glUniform1iARB(id0,res);
glDrawBuffers(min(maxColorBuffers,layer-(tile*maxColorBuffers)),buffers);
glDisable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR);
glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glDisable(GL_LOGIC_OP);
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0);
glViewport(0, 0, res, res);
glClearColorIuiEXT(0,0,0,0);
glClear( GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glUniformSubroutinesuiv( GL_FRAGMENT_SHADER, 1, &fetchXIndex);
float l = -1.0-(1.0/(tilesize/128));
glBegin(GL_QUADS);
for(int i=tile*maxColorBuffers+1; i<=min(maxColorBuffers,layer-(tile*maxColorBuffers)) ; i++)
{
glVertexAttrib3f(t_index, 0, res, i-1); glVertex3f(-1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, res , i-1); glVertex3f( 1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, 0, i-1); glVertex3f( 1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, 0, 0 , i-1); glVertex3f(-1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
}
glEnd();
glFlush();
//glCallList(dispListIndex);
//
/*float layer = -1.0-(1.0/(res/128));
glBegin(GL_QUADS);
for(int i=1;i<=(res/128);i++)
{
glTexCoord3i(0 , res , i-1); glVertex3f(-1.0f, 1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(res, res , i-1); glVertex3f( 1.0f, 1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(res, 0 , i-1); glVertex3f( 1.0f,-1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(0 , 0 , i-1); glVertex3f(-1.0f,-1.0f, layer + i*(2.0/(res/128)));
}
glEnd();
glFlush();*/
glUniformSubroutinesuiv( GL_FRAGMENT_SHADER, 1, &fetchYIndex);
l = -1.0-(1.0/(tilesize/128));
glBegin(GL_QUADS);
for(int i=tile*maxColorBuffers+1; i<=min(maxColorBuffers,layer-(tile*maxColorBuffers)) ; i++)
{
glVertexAttrib3f(t_index, 0, res, i-1); glVertex3f(-1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, res , i-1); glVertex3f( 1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, 0, i-1); glVertex3f( 1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, 0, 0 , i-1); glVertex3f(-1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
}
glEnd();
glFlush();
glDisable(GL_COLOR_LOGIC_OP);
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
glEnable(GL_POINT_SMOOTH);
glDisable(GL_COLOR_LOGIC_OP);
glClearColorIuiEXT(0,0,0,0);
}
glBindTexture(GL_TEXTURE_3D_EXT,0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
glDeleteFramebuffersEXT (1,&fbo);
glUseProgramObjectARB(0);
CUDA_SAFE_CALL( cudaGraphicsMapResources( 1, &resource, NULL ) );
cudaArray *in_array;
CUDA_SAFE_CALL( cudaGraphicsSubResourceGetMappedArray( &in_array, resource, 0, 0));
CUDA_SAFE_CALL( cudaBindTextureToArray(site_tex, in_array) );
CUDA_SAFE_CALL( cudaGraphicsUnmapResources( 1, &resource, NULL ) );
//vbosize = LDNIDistanceField_Read3DTextureToVBO(resource, vbo, res, width, origin);
/*int arrsize = res*res;
CUDA_SAFE_CALL(cudaMalloc((void**) &sites_index, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( sites_index, 0, (arrsize+1)*sizeof(unsigned int)) );
LDNIDistanceField_CountBitInInteger<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites_index, res*res*(res/128), res);
thrust::device_ptr<unsigned int> dev_ptr(sites_index); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int siteNum=dev_ptr[arrsize];
printf("Number of Sites: ----- %d\n",siteNum);
vbosize = siteNum;
CUDA_SAFE_CALL(cudaMalloc((void**) &sites, siteNum*sizeof(unsigned short)));
CUDA_SAFE_CALL(cudaMemset( sites, 0, siteNum*sizeof(unsigned short)) );
unsigned int *temp2D;
CUDA_SAFE_CALL(cudaMalloc((void**) &temp2D, arrsize*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( temp2D, 0, arrsize*sizeof(unsigned int)) );
LDNIDistanceField__writeTexToArray<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites, res, sites_index, temp2D, res*res*(res/128));
cudaFree(temp2D);
cudaFree(counter);*/
unsigned int *counter;
CUDA_SAFE_CALL(cudaMalloc((void**) &counter,sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( counter, 0, sizeof(unsigned int)) );
PBADistanceField__writeTexToArray<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(inputDF, res, res*res*(res/128), counter);
CUDA_SAFE_CALL( cudaMemcpy( &vbosize, counter, sizeof(unsigned int), cudaMemcpyDeviceToHost ) );
cudaGraphicsUnregisterResource(resource);
/**/
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
glDeleteTextures(3, PrimitiveVoxel);
glDisable(GL_TEXTURE_3D_EXT);
glDisable(GL_TEXTURE_RECTANGLE_ARB);
return true;
}
bool LDNIcudaOperation::LDNIDistanceField_SitesGeneration(QuadTrglMesh *mesh, GLuint *vbo, int &vbosize, int res, float boundingBox[], unsigned int *&sites_index, unsigned short *&sites)
{
const bool bCube=true;
float origin[3],gWidth, width; long time=clock(),totalTime=clock();
int i,nodeNum;
char fileadd[256];
if ((boundingBox[0]==boundingBox[1]) && (boundingBox[2]==boundingBox[3]) && (boundingBox[4]==boundingBox[5])) {
mesh->CompBoundingBox(boundingBox);
if (bCube) {
float xx=(boundingBox[0]+boundingBox[1])*0.5f;
float yy=(boundingBox[2]+boundingBox[3])*0.5f;
float zz=(boundingBox[4]+boundingBox[5])*0.5f;
float ww=boundingBox[1]-boundingBox[0];
if ((boundingBox[3]-boundingBox[2])>ww) ww=boundingBox[3]-boundingBox[2];
if ((boundingBox[5]-boundingBox[4])>ww) ww=boundingBox[5]-boundingBox[4];
ww=ww*0.55+ww/(float)(res-1)*2.0;
boundingBox[0]=xx-ww; boundingBox[1]=xx+ww;
boundingBox[2]=yy-ww; boundingBox[3]=yy+ww;
boundingBox[4]=zz-ww; boundingBox[5]=zz+ww;
}
}
gWidth=(boundingBox[1]-boundingBox[0])/(float)res;
width = gWidth*(float)res;
origin[0]=boundingBox[0]+gWidth*0.5f;
origin[1]=boundingBox[2]+gWidth*0.5f;
origin[2]=boundingBox[4]+gWidth*0.5f;
if (glewInit() != GLEW_OK) {printf("glewInit failed. Exiting...\n"); return false;}
if (glewIsSupported("GL_VERSION_2_0")) {printf("\nReady for OpenGL 2.0\n");} else {printf("OpenGL 2.0 not supported\n"); return false;}
//-----------------------------------------------------------------------------------------
int dispListIndex; GLhandleARB g_programObj, g_vertexShader, g_GeometryShader, g_FragShader;
GLuint vertexTexture;
const char *VshaderString[1],*GshaderString[1],*FshaderString[1];
GLint bCompiled = 0, bLinked = 0;
char str[4096] = ""; int xF,yF;
short nAxis;
GLenum buffers[16] = {GL_COLOR_ATTACHMENT0_EXT, GL_COLOR_ATTACHMENT1_EXT, GL_COLOR_ATTACHMENT2_EXT, GL_COLOR_ATTACHMENT3_EXT
, GL_COLOR_ATTACHMENT4_EXT, GL_COLOR_ATTACHMENT5_EXT, GL_COLOR_ATTACHMENT6_EXT, GL_COLOR_ATTACHMENT7_EXT
, GL_COLOR_ATTACHMENT8_EXT, GL_COLOR_ATTACHMENT9_EXT, GL_COLOR_ATTACHMENT10_EXT, GL_COLOR_ATTACHMENT11_EXT
, GL_COLOR_ATTACHMENT12_EXT, GL_COLOR_ATTACHMENT13_EXT, GL_COLOR_ATTACHMENT14_EXT, GL_COLOR_ATTACHMENT15_EXT};
//-----------------------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\sampleLDNIVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
unsigned char *ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Vertex Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\newSampleLDNIGShader.geo");
g_GeometryShader = glCreateShaderObjectARB( GL_GEOMETRY_SHADER_EXT );
ShaderAssembly = _readShaderFile( fileadd );
GshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_GeometryShader, 1, GshaderString, NULL );
glCompileShaderARB( g_GeometryShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_GeometryShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_GeometryShader, sizeof(str), NULL, str);
printf("Warning: Geo Shader Compile Error\n\n"); return false;
}
//-----------------------------------------------------------------------------
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\voxelLDNIFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Fragment Shader Compile Error\n\n %s", str); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL! 1 \n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_GeometryShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Geometry Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
//-----------------------------------------------------------------------------
// texture setting for fragment shader
//memset(fileadd,0,256*sizeof(char));
//strcat(fileadd, "Outdata");
int maxColorBuffers, maxTextureSize;
int layer = res/128;
glGetIntegerv( GL_MAX_COLOR_ATTACHMENTS_EXT, &maxColorBuffers );
glGetIntegerv( GL_MAX_3D_TEXTURE_SIZE_EXT, &maxTextureSize );
int z_tile = ceil(layer/(float)maxColorBuffers);
printf("max texture size %d %d\n", maxTextureSize, layer);
char value[10];
for(i=0; i < min(layer, maxColorBuffers); i++){
memset(fileadd,0,256*sizeof(char));
strcat(fileadd, "Outdata");
value[0] = '\0';
sprintf(value, "%d", i+1 );
strcat(fileadd, value);
glBindFragDataLocationEXT(g_programObj,i,fileadd);
}
int tilesize = min(layer, maxColorBuffers)*128;
//-------------------------------------------------------------------------------
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 2: creating texture for vertex array and binding
long texBindingTime=clock();
glGetError(); // for clean-up the error generated before
nodeNum=mesh->GetNodeNumber(); _texCalProduct(nodeNum,xF,yF);
int temp;
for(temp=1;temp<xF;temp *= 2) {}
xF = temp; //if (xF<64) xF=64;
yF = (int)(nodeNum/xF)+1; if (yF<64) yF=64;
printf("Texture Size: xF=%d yF=%d\n",xF,yF);
float* verTex=(float*)malloc(xF*yF*3*sizeof(float));
memset(verTex,0,xF*yF*3*sizeof(float));
memcpy(verTex,mesh->GetNodeArrayPtr(),nodeNum*3*sizeof(float));
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glGenTextures(1, &vertexTexture);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, vertexTexture);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGB32F_ARB, xF, yF, 0, GL_RGB, GL_FLOAT, verTex);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, 0);
free(verTex);
if (glGetError()!=GL_NO_ERROR) printf("Error: GL_TEXTURE_RECTANGLE_ARB texture binding!\n\n");
texBindingTime=clock()-texBindingTime;
printf("\nTime for binding texture onto the graphics memory - %ld (ms)\n\n",texBindingTime);
//-----------------------------------------------------------------------------------------
// Step 3: building GL-list for activating the geometry shader
unsigned int ver[4];
int faceNum=mesh->GetFaceNumber();
dispListIndex = glGenLists(1);
glNewList(dispListIndex, GL_COMPILE);
glBegin(GL_POINTS);
for(i=0;i<faceNum;i++) {
mesh->GetFaceNodes(i+1,ver[0],ver[1],ver[2],ver[3]);
glVertex3i(ver[0]-1,ver[1]-1,ver[2]-1);
if (mesh->IsQuadFace(i+1)) {glVertex3i(ver[0]-1,ver[2]-1,ver[3]-1);} // one more triangle
}
glEnd();
glEndList();
//-----------------------------------------------------------------------------------------
// Step 4: using program objects and the texture
GLint id0,id1,id2,id3,id4; float centerPos[3];
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB,vertexTexture);
glUseProgramObjectARB(g_programObj);
id0 = glGetUniformLocationARB(g_programObj,"sizeNx");
glUniform1iARB(id0,xF);
centerPos[0]=(boundingBox[0]+boundingBox[1])*0.5f;
centerPos[1]=(boundingBox[2]+boundingBox[3])*0.5f;
centerPos[2]=(boundingBox[4]+boundingBox[5])*0.5f;
id1 = glGetUniformLocationARB(g_programObj,"Cent");
glUniform3fARB(id1,centerPos[0],centerPos[1],centerPos[2]);
id2 = glGetUniformLocationARB(g_programObj,"res");
glUniform1iARB(id2,res);
id3 = glGetUniformLocationARB(g_programObj,"tilesize");
glUniform1iARB(id3,tilesize);
if (glGetError()!=GL_NO_ERROR) printf("Error: vertex texture binding!\n\n");
printf("Create shader texture\n");
//-----------------------------------------------------------------------------------------
// Step 5: Prepare 3D texture for voxelization
GLuint PrimitiveVoxel[3];
glEnable(GL_TEXTURE_3D_EXT);
glGenTextures(1, &PrimitiveVoxel[0]); // x-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[0]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
//if res <= 2048 , create texture directly. Otherwise, need to subdivide the texture
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glGenTextures(1, &PrimitiveVoxel[1]); // y-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[1]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glGenTextures(1, &PrimitiveVoxel[2]); // z-axis
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[2]);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MIN_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_MAG_FILTER,GL_NEAREST);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_S,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_T,GL_CLAMP);
glTexParameteri(GL_TEXTURE_3D_EXT,GL_TEXTURE_WRAP_R,GL_CLAMP);
//glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, min(layer, maxColorBuffers), 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glTexImage3DEXT(GL_TEXTURE_3D_EXT, 0, GL_RGBA32UI_EXT, res, res, layer, 0, GL_RGBA_INTEGER_EXT, GL_UNSIGNED_INT, 0 );
glBindTexture(GL_TEXTURE_3D_EXT, 0);
//-----------------------------------------------------------------------------------------
// Step 6: Voxelization
GLuint fbo;
int buffersize = min(layer, maxColorBuffers);
int tile;
for(tile=0; tile < z_tile; tile++)
{
for(nAxis=0; nAxis < 3; nAxis++)
{
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,fbo);
glBindTexture(GL_TEXTURE_3D_EXT, PrimitiveVoxel[nAxis]);
for(int a=tile*maxColorBuffers; a < min(maxColorBuffers,layer-(tile*maxColorBuffers)); a++) glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, buffers[a] ,GL_TEXTURE_3D_EXT, PrimitiveVoxel[nAxis], 0, a);
printf("tile - %d %d %d \n", z_tile, tile, buffersize);
id4 = glGetUniformLocationARB(g_programObj,"tile");
glUniform1iARB(id4,tile);
glDrawBuffers(buffersize,buffers);
glEnable(GL_DEPTH_TEST);
glDisable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR);
glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_LOGIC_OP);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glViewport(0,0,res,res);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-width*0.5,width*0.5,-width*0.5,width*0.5,width*0.5,-width*0.5);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glClearColorIuiEXT(0,0,0,0);
glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glDepthFunc(GL_ALWAYS);
glPushMatrix();
switch(nAxis) {
case 0:{glRotatef(-90,0,1,0); glRotatef(-90,1,0,0); }break;
case 1:{glRotatef(90,0,1,0); glRotatef(90,0,0,1); }break;
}
glCallList(dispListIndex);
glFlush();
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
glBindTexture(GL_TEXTURE_3D_EXT, 0);
glDisable(GL_COLOR_LOGIC_OP);
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glDisable(GL_STENCIL_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
glEnable(GL_POINT_SMOOTH);
glClearColorIuiEXT(0,0,0,0);
glDeleteFramebuffersEXT (1,&fbo);
}
}
glUseProgramObjectARB(0);
glDeleteLists(dispListIndex, 1);
glBindTexture( GL_TEXTURE_RECTANGLE_ARB, 0);
glDeleteTextures(1, &vertexTexture);
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
//-----------------------------------------------------------------------------------------
// Step 7: Build Composite Shader
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\CompositeVertexShader.vert");
g_vertexShader = glCreateShaderObjectARB( GL_VERTEX_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
VshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_vertexShader, 1, VshaderString, NULL );
glCompileShaderARB( g_vertexShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_vertexShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_vertexShader, sizeof(str), NULL, str);
printf("Warning: Composite Vertex Shader Compile Error\n\n %s ", str); return false;
}
memset(fileadd,0,256*sizeof(char));
strcat(fileadd,"..\\Shader\\CompositeFragmentShader.frag");
g_FragShader = glCreateShaderObjectARB( GL_FRAGMENT_SHADER_ARB );
ShaderAssembly = _readShaderFile( fileadd );
FshaderString[0] = (char*)ShaderAssembly;
glShaderSourceARB( g_FragShader, 1, FshaderString, NULL );
glCompileShaderARB( g_FragShader);
delete ShaderAssembly;
glGetObjectParameterivARB( g_FragShader, GL_OBJECT_COMPILE_STATUS_ARB, &bCompiled );
if (bCompiled == false) {
glGetInfoLogARB(g_FragShader, sizeof(str), NULL, str);
printf("Warning: Composite Fragment Shader Compile Error\n\n %s", str); return false;
}
g_programObj = glCreateProgramObjectARB();
if (glGetError()!=GL_NO_ERROR) printf("Error: OpenGL! \n\n");
glAttachObjectARB( g_programObj, g_vertexShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Vertex Shader!\n\n");
glAttachObjectARB( g_programObj, g_FragShader ); if (glGetError()!=GL_NO_ERROR) printf("Error: attach Fragment Shader!\n\n");
for(i=0; i < min(layer, maxColorBuffers); i++){
memset(fileadd,0,256*sizeof(char));
strcat(fileadd, "Outdata");
value[0] = '\0';
sprintf(value, "%d", i+1 );
strcat(fileadd, value);
glBindFragDataLocationEXT(g_programObj,i,fileadd);
}
//-------------------------------------------------------------------------------
glLinkProgramARB( g_programObj);
glGetObjectParameterivARB( g_programObj, GL_OBJECT_LINK_STATUS_ARB, &bLinked );
if( bLinked == false ) {
glGetInfoLogARB( g_programObj, sizeof(str), NULL, str );
printf("Linking Fail: %s\n",str); return false;
}
//-----------------------------------------------------------------------------------------
// Step 8: Composite the voxelization result
cudaGraphicsResource *resource;
int t_index = glGetAttribLocation( g_programObj, "in_coord");
CUDA_SAFE_CALL( cudaGraphicsGLRegisterImage(&resource, PrimitiveVoxel[2], GL_TEXTURE_3D, cudaGraphicsMapFlagsReadOnly) );
for(tile=0; tile < z_tile; tile++)
{
glGenFramebuffersEXT(1, &fbo);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,fbo);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[2]);
for(int a=tile*maxColorBuffers; a < min(maxColorBuffers,layer-(tile*maxColorBuffers)); a++)
glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, buffers[a] ,GL_TEXTURE_3D_EXT, PrimitiveVoxel[2], 0, a);
//CUDA_SAFE_CALL( cudaGraphicsGLRegisterImage(&resource, PrimitiveVoxel[2], GL_TEXTURE_3D, cudaGraphicsMapFlagsReadOnly) );
glUseProgramObjectARB(g_programObj);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[0]);
glDisable(GL_TEXTURE_3D_EXT);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_3D_EXT,PrimitiveVoxel[1]);
glDisable(GL_TEXTURE_3D_EXT);
GLuint fetchXIndex = glGetSubroutineIndex(g_programObj, GL_FRAGMENT_SHADER, "FetchTextureX");
GLuint fetchYIndex = glGetSubroutineIndex(g_programObj, GL_FRAGMENT_SHADER, "FetchTextureY");
GLint tex0;
tex0 = glGetUniformLocationARB(g_programObj,"Xtex");
glUniform1iARB(tex0,0);
tex0 = glGetUniformLocationARB(g_programObj,"Ytex");
glUniform1iARB(tex0,1);
id0 = glGetUniformLocationARB(g_programObj,"res");
glUniform1iARB(id0,res);
glDrawBuffers(min(maxColorBuffers,layer-(tile*maxColorBuffers)),buffers);
glDisable(GL_STENCIL_TEST);
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
glDisable(GL_POLYGON_OFFSET_FILL);
glDisable(GL_POLYGON_OFFSET_LINE);
glDisable(GL_BLEND);
glDisable(GL_POLYGON_SMOOTH); // turn off anti-aliasing
glDisable(GL_POINT_SMOOTH);
glDisable(GL_LINE_SMOOTH);
glDisable(GL_MAP_COLOR);
glDisable(GL_DITHER);
glShadeModel(GL_FLAT);
glDisable(GL_LIGHTING); glDisable(GL_LIGHT0);
glDisable(GL_COLOR_MATERIAL);
glDisable(GL_ALPHA_TEST);
glDisable(GL_LOGIC_OP);
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(GL_OR);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0);
glViewport(0, 0, res, res);
glClearColorIuiEXT(0,0,0,0);
glClear( GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT);
glUniformSubroutinesuiv( GL_FRAGMENT_SHADER, 1, &fetchXIndex);
float l = -1.0-(1.0/(tilesize/128));
glBegin(GL_QUADS);
for(int i=tile*maxColorBuffers+1; i<=min(maxColorBuffers,layer-(tile*maxColorBuffers)) ; i++)
{
glVertexAttrib3f(t_index, 0, res, i-1); glVertex3f(-1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, res , i-1); glVertex3f( 1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, 0, i-1); glVertex3f( 1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, 0, 0 , i-1); glVertex3f(-1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
}
glEnd();
glFlush();
//glCallList(dispListIndex);
//
/*float layer = -1.0-(1.0/(res/128));
glBegin(GL_QUADS);
for(int i=1;i<=(res/128);i++)
{
glTexCoord3i(0 , res , i-1); glVertex3f(-1.0f, 1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(res, res , i-1); glVertex3f( 1.0f, 1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(res, 0 , i-1); glVertex3f( 1.0f,-1.0f, layer + i*(2.0/(res/128)));
glTexCoord3i(0 , 0 , i-1); glVertex3f(-1.0f,-1.0f, layer + i*(2.0/(res/128)));
}
glEnd();
glFlush();*/
glUniformSubroutinesuiv( GL_FRAGMENT_SHADER, 1, &fetchYIndex);
l = -1.0-(1.0/(tilesize/128));
glBegin(GL_QUADS);
for(int i=tile*maxColorBuffers+1; i<=min(maxColorBuffers,layer-(tile*maxColorBuffers)) ; i++)
{
glVertexAttrib3f(t_index, 0, res, i-1); glVertex3f(-1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, res , i-1); glVertex3f( 1.0f, 1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, res, 0, i-1); glVertex3f( 1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
glVertexAttrib3f(t_index, 0, 0 , i-1); glVertex3f(-1.0f,-1.0f, l + i*(2.0/(tilesize/128)));
}
glEnd();
glFlush();
glDisable(GL_COLOR_LOGIC_OP);
glEnable(GL_POLYGON_OFFSET_FILL);
glEnable(GL_POLYGON_OFFSET_LINE);
glEnable(GL_BLEND);
glEnable(GL_DITHER);
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glEnable(GL_MAP_COLOR);
glShadeModel(GL_SMOOTH);
glEnable(GL_LIGHTING); glEnable(GL_LIGHT0);
glEnable(GL_POINT_SMOOTH);
glDisable(GL_COLOR_LOGIC_OP);
glClearColorIuiEXT(0,0,0,0);
}
glBindTexture(GL_TEXTURE_3D_EXT,0);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT,0);
glDeleteFramebuffersEXT (1,&fbo);
glUseProgramObjectARB(0);
CUDA_SAFE_CALL( cudaGraphicsMapResources( 1, &resource, NULL ) );
cudaArray *in_array;
CUDA_SAFE_CALL( cudaGraphicsSubResourceGetMappedArray( &in_array, resource, 0, 0));
CUDA_SAFE_CALL( cudaBindTextureToArray(site_tex, in_array) );
CUDA_SAFE_CALL( cudaGraphicsUnmapResources( 1, &resource, NULL ) );
printf("Memory Spent %.2f(MB)\n",(res*res*res/8)*1e-6);/**/
//vbosize = LDNIDistanceField_Read3DTextureToVBO(resource, vbo, res, width, origin);
int arrsize = res*res;
CUDA_SAFE_CALL(cudaMalloc((void**) &sites_index, (arrsize+1)*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( sites_index, 0, (arrsize+1)*sizeof(unsigned int)) );
LDNIDistanceField_CountBitInInteger<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites_index, res*res*(res/128), res);
thrust::device_ptr<unsigned int> dev_ptr(sites_index); // Wrap raw pointers with dev_ptr
thrust::exclusive_scan(dev_ptr, dev_ptr+(arrsize+1), dev_ptr); // in-place scan
unsigned int siteNum=dev_ptr[arrsize];
printf("Number of Sites: ----- %d\n",siteNum);
vbosize = siteNum;
CUDA_SAFE_CALL(cudaMalloc((void**) &sites, siteNum*sizeof(unsigned short)));
CUDA_SAFE_CALL(cudaMemset( sites, 0, siteNum*sizeof(unsigned short)) );
unsigned int *counter;
CUDA_SAFE_CALL(cudaMalloc((void**) &counter,sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( counter, 0, sizeof(unsigned int)) );
unsigned int *temp2D;
CUDA_SAFE_CALL(cudaMalloc((void**) &temp2D, arrsize*sizeof(unsigned int)));
CUDA_SAFE_CALL(cudaMemset( temp2D, 0, arrsize*sizeof(unsigned int)) );
LDNIDistanceField__writeTexToArray<<<BLOCKS_PER_GRID,THREADS_PER_BLOCK>>>(sites, res, sites_index, temp2D, res*res*(res/128));
cudaFree(temp2D);
cudaFree(counter);
cudaGraphicsUnregisterResource(resource);
/**/
glDeleteObjectARB( g_vertexShader);
glDeleteObjectARB( g_GeometryShader);
glDeleteObjectARB( g_FragShader);
glDeleteObjectARB( g_programObj);
glDeleteTextures(3, PrimitiveVoxel);
glDisable(GL_TEXTURE_3D_EXT);
glDisable(GL_TEXTURE_RECTANGLE_ARB);
return true;
}
void LDNIcudaOperation::PBADistanceField_CompareResult(int *inputDF, int res, int numOfSite, int *sites)
{
float totalDistError = 0.0;
float maxDistError = 0.0;
int errorCount = 0;
int dx, dy, dz, nx, ny, nz;
double dist, myDist, correctDist, error;
int* output = (int*)malloc(res*res*res*sizeof(int));
CUDA_SAFE_CALL(cudaMemcpy(output, inputDF, res*res*res*sizeof(int), cudaMemcpyDeviceToHost));
for (int i = 0; i < res; i++)
for (int j = 0; j < res; j++)
for (int k = 0; k < res; k++) {
int id = TOID_CPU(i, j, k, res);
DECODE(output[id], nx, ny, nz);
dx = nx - i; dy = ny - j; dz = nz - k;
correctDist = myDist = dx * dx + dy * dy + dz * dz;
//if (output[k*res*res+j*res+i] == 0)
//if (i == 0 && j == 245 && k == 231)
//{
// printf("error~~~~~~~~~ %d %d %d \n", i, j, k);
// printf(" Error!!!!!!!!!!!! %d %d %d %d %d %f \n", res, output[id], nx,ny,nz, myDist);
//}
for (int t = 0; t < numOfSite; t++) {
DECODE(sites[t], nx, ny, nz);
dx = nx - i; dy = ny - j; dz = nz - k;
dist = dx * dx + dy * dy + dz * dz;
if (dist < correctDist)
{
/*if (i == 0 && j == 245 && k == 231)
{
printf("%d %d %f %f %d %d %d \n", t, sites[t], correctDist, dist, nx,ny,nz);
}*/
correctDist = dist;
}
}
if (correctDist != myDist) {
error = fabs(sqrt(myDist) - sqrt(correctDist));
if (i == 0 && j == 245 && k == 231)
{
//printf(" Error!!!!!!!!!!!! %d %d %d \n", i, j, k);
printf(" Error!!!!!!!!!!!! %f %f %f %d %d %d \n", myDist, dist, correctDist, i,j,k);
}
errorCount++;
totalDistError += error;
if (error > maxDistError)
maxDistError = error;
}
}
free(output);
}
//--------------------------------------------------------------------------------------------
// Kernel functions
//--------------------------------------------------------------------------------------------
__global__ void LDNIDistanceField_CountBitInArray(unsigned int *d_index, unsigned int *m_3dArray, int nodeNum, int res)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int temp;
int ix,iy,iz;
unsigned int count = 0;
while(tid<nodeNum) {
temp = m_3dArray[tid];
count = bitCount(temp);
atomicAdd(d_index,count);
/*count = 0;
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
count= bitCount(temp.x);
count+= bitCount(temp.y);
count+= bitCount(temp.z);
count+= bitCount(temp.w);
//atomicAdd(d_output,count);
atomicAdd(&d_index[iy*res+ix],count);
*/
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField_CountBitInInteger(unsigned int *d_index, int nodeNum, int res)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
uint4 temp;
int ix,iy,iz;
unsigned int count = 0;
while(tid<nodeNum) {
count = 0;
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
count= bitCount(temp.x);
count+= bitCount(temp.y);
count+= bitCount(temp.z);
count+= bitCount(temp.w);
//atomicAdd(d_output,count);
atomicAdd(&d_index[iy*res+ix],count);
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__FilterProbablySiteInXByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int st = 0, num = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int temp;
int currentSite, prevSite, dist1, dist2;
short currentIndex, ind;
float d;
short i,j;
unsigned int buffer[THREADS_PER_BLOCK] = {0};
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
//printf("error %d %d %d %d %d %d\n", tid, ix, iy, iz,(tid/(chunksize*res)),(tid%(chunksize*res)/(res*res)) );
return;
}
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num > 0)
currentSite = sites[st];
prevSite = 0;
currentIndex = 0;
}
if (num > 0)
{
//if (ix ==512 && iy == 512)
// printf("tid %d %d %d %d %d %d %d %d \n", iz, num, st, prevSite, currentSite, currentIndex, sites[st], sites[st+1]);
if (iz == currentSite)
{
prevSite = currentSite;
currentIndex++;
if (currentIndex >= num)
{prevSite = 0;}
else
{currentSite = sites[st+currentIndex];}
}
if (prevSite <=0)
{
dist1 = abs((int)iz-currentSite);
if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
//printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else
{
dist2 = abs((int)iz-currentSite);
dist1 = abs((int)iz-prevSite);
if (dist1 <= offsetPixel || dist2 <=offsetPixel)
{
if (dist1 <= dist2)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
}
else
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
}
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d %d\n", iz, dist1, dist2, ind, prevSite, currentSite);
}
}
}
if ((iz+1)%32 == 0 && num>0)
{
j=0;
//for(i=max(0,iy-offsetPixel); i<=min(res,iy+offsetPixel); j++,i++)
for(i=ix-offsetPixel; i<=ix+offsetPixel; j++,i++)
{
if (i<0 || i >= res) continue;
if (buffer[j]!=0)
{
atomicXor(&bitSites[(iz/32)*res*res+iy*res+i], buffer[j] );
}
}
for(j=0;j<offsetPixel*2+1;j++)
buffer[j]=0;
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__FilterProbablySiteInYByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int st = 0, num = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int temp;
int currentSite, prevSite, dist1, dist2;
short currentIndex, ind;
float d;
short i,j;
unsigned int buffer[THREADS_PER_BLOCK] = {0};
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
//printf("error %d %d %d %d %d %d\n", tid, ix, iy, iz,(tid/(chunksize*res)),(tid%(chunksize*res)/(res*res)) );
return;
}
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num > 0)
currentSite = sites[st];
prevSite = 0;
currentIndex = 0;
}
if (num > 0)
{
//if (ix ==512 && iy == 512)
// printf("tid %d %d %d %d %d %d %d %d \n", iz, num, st, prevSite, currentSite, currentIndex, sites[st], sites[st+1]);
if (iz == currentSite)
{
prevSite = currentSite;
currentIndex++;
if (currentIndex >= num)
{currentSite = 0;}
else
{currentSite = sites[st+currentIndex];}
}
if (prevSite <=0 && currentSite > 0)
{
dist1 = abs((int)iz-currentSite);
if(dist1 <= offsetPixel && iz <= currentSite)
//if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
//printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else if (prevSite > 0 && currentSite <= 0)
{
dist2 = abs((int)iz-prevSite);
if(dist2 <= offsetPixel && iz >= prevSite)
//if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
//printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else if (prevSite > 0 && currentSite > 0)
{
dist2 = abs((int)iz-currentSite);
dist1 = abs((int)iz-prevSite);
if (dist1 <= offsetPixel || dist2 <=offsetPixel)
{
if (dist1 <= dist2 && iz <= prevSite)
//if (dist1 <= dist2 )
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
}
else if (dist1 > dist2 && iz <= currentSite)
//else
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d %d\n", iz, dist1, dist2, ind, prevSite, currentSite);
}
}
}
if ((iz+1)%32 == 0 && num>0)
{
j=0;
//for(i=max(0,iy-offsetPixel); i<=min(res,iy+offsetPixel); j++,i++)
for(i=iy-offsetPixel; i<=iy+offsetPixel; j++,i++)
{
if (i<0 || i >= res) continue;
if (buffer[j]!=0)
{
atomicXor(&bitSites[(iz/32)*res*res+i*res+ix], buffer[j] );
}
}
for(j=0;j<offsetPixel*2+1;j++)
buffer[j]=0;
}
tid += blockDim.x * gridDim.x;
}
}
#define LDNIMARKER 1024
__global__ void LDNIDistanceField__GetSiteByDist(ushort3 *sites, unsigned int *counter, unsigned int *sites_index, unsigned int *sites_off, int offdist, int res, int nodeNum)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int ix,iy,iz, st, num, ind,i;
ushort3 current_id, prev_id, temp;
unsigned int dist, bitResult, count;
float2 value;
float off;
while(index<nodeNum) {
iy = index%res;
iz = (index%(chunksize*res)/res)/(chunksize/res);
ix = (index/(chunksize*res))*(chunksize/res)+(index%(chunksize*res)%(chunksize)/res);
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num>0) current_id = sites[st];
prev_id = make_ushort3(LDNIMARKER,LDNIMARKER,LDNIMARKER);
ind = 0;
bitResult = 0;
count = 0;
off = 0.0;
}
if (num > 0)
{
if (iz == current_id.x)
{
prev_id = current_id;
ind++;
if (ind >= num)
current_id = make_ushort3(LDNIMARKER,LDNIMARKER,LDNIMARKER);
else
current_id = sites[st+ind];
bitResult = bitResult | SetBitPos(iz%32);
count++;
//if (ix == 334 && iy == 299 )
//printf("id: %d %d %d %d \n", prev_id.x, prev_id.y, prev_id.z, ind);
}
value.x = sqrt((float)((prev_id.x-iz)*(prev_id.x-iz)+(prev_id.y-ix)*(prev_id.y-ix)+(prev_id.z-iy)*(prev_id.z-iy)));
value.y = sqrt((float)((current_id.x-iz)*(current_id.x-iz)+(current_id.y-ix)*(current_id.y-ix)+(current_id.z-iy)*(current_id.z-iy)));
//if (ix == 334 && iy == 299)
//{
// printf("id: %d %d %d %d %d \n", iz, current_id.x, current_id.y, current_id.z, ind);
//for(i=0; i <num; i++)
//{
// temp = sites[st+i];
// printf("id: %d %d %d %d \n", temp.x, temp.y, temp.z, i);
//}
//}
//dist = (value.x < value.y)? value.x:value.y;
off = (value.x < value.y)? value.x:value.y;
//if (ix == 334 && iy == 299 && iz == 301)
//{
// printf("prev: %d %d %d %d %d\n", prev_id.x, prev_id.y, prev_id.z, st, num);
// printf("curr: %d %d %d \n", current_id.x, current_id.y, current_id.z);
// printf("%f %f %f %d %d %d %d \n", off, value.x, value.y, offdist, ix, iy, iz);
//}
//if (off > offdist && off < offdist+1.0)
//{
/*bitResult = bitResult | SetBitPos(iz%32);
count++;*/
//}
if ((iz+1)%32 == 0)
{
sites_off[(iz/32)*res*res+iy*res+ix]= bitResult;
bitResult = 0;
}
if (iz == res-1)
{
atomicAdd(counter, count);
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__SortProbablySite2(unsigned int *sites, unsigned int *sites_index, int res, int nodeNum)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix, iy, st, num, temp;
short i,j;
unsigned int tempdepth;
unsigned int depth[256];
while(index<nodeNum) {
st = sites_index[index];
num = sites_index[index+1]-st;
if (num > 0)
{
if (num > 256) { printf("too many num on one thread!!! %d\n", num); return;};
for(i=0;i<num;i++)
{
depth[i]=sites[st+i];
}
for(i=0;i<num;i++) {
for(j=i+1;j<num;j++) {
if (GET_X(depth[i]) > GET_X(depth[j]) ){
tempdepth=depth[i]; depth[i]=depth[j]; depth[j]=tempdepth;
}
}
}
for(i=0;i<num;i++)
{
sites[st+i]=depth[i];
//if (index == 143640)
// printf("depth %d %d %d \n", GET_X(depth[i]), GET_Y(depth[i]), GET_Z(depth[i]));
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__SortProbablySite(unsigned int *sites, unsigned int *sites_index, int res, int nodeNum)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix, iy, st, num, temp;
short i,j;
unsigned int tempdepth;
unsigned int depth[256];
while(index<nodeNum) {
st = sites_index[index];
num = sites_index[index+1]-st;
if (num > 0)
{
if (num > 256) { printf("too many num on one thread!!! %d\n", num); return;};
/*if (506*res + 256 == index)
printf("num %d \n", num);*/
for(i=0;i<num;i++)
{
depth[i]=sites[st+i];
/* if (506*res + 256 == index)
printf("nnnn %d \n", depth[i]);*/
}
for(i=0;i<num;i++) {
for(j=i+1;j<num;j++) {
//f (depth[i].x>depth[j].x) {
if (GET_STACK(depth[i]) > GET_STACK(depth[j]) ){
tempdepth=depth[i]; depth[i]=depth[j]; depth[j]=tempdepth;
}
}
}
for(i=0;i<num;i++)
{
//if (tz == 250 && tx == 431 )
//if (index == 220922)
// printf("%d %d %d \n", i, GET_STACK(depth[i]), GET_PTR(depth[i]));
sites[st+i]=depth[i];
}
}
/*else
{
printf("no site %d \n", index);
}*/
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GetProbablySiteInY(unsigned int *bitDeleted, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, unsigned short *sites_x, unsigned int *sites_index_x, int3 res, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int ix,iy,iz;
unsigned int bitresult, st_y, num_y;
//short current_id, prev_id, dist;
short middle_id[BANDWIDTH], ind[BANDWIDTH], current_id[BANDWIDTH], next_id[BANDWIDTH];
short num[BANDWIDTH], i, j, k, count;
unsigned int st[BANDWIDTH], stack[BANDWIDTH];
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
bitresult = 0;
if (iz == 0)
{
j = iy*BANDWIDTH;
for(i=0; i < BANDWIDTH; i++)
{
st[i] = sites_index_x[(j+i)*res.x+ix];
num[i] = sites_index_x[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites_x[st[i]];
next_id[i] = sites_x[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites_x[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
bitresult = ~bitDeleted[iy*res.x*res.z+ix*res.x+iz];
//if (__popc(bitresult)>0)
//{
count = 0;
/*if (ix == 32 && iz == 1)
printf("test test %d %d %d %d \n", ix, iy, iz, __popc(bitresult));*/
for(i=0; i < BANDWIDTH ; i++)
{
if (num[i]>0 && GetBitPos(i, bitresult))
{
if (iz < middle_id[i])
{
stack[count] = ENCODE_STACK(iy*BANDWIDTH+i, current_id[i]);
//if (ix == 256 && iy == 5 && iz == 508)
//if (ix == 65 && iy == 3 && i == 8 )
// printf("test test %d %d %d \n", stack[count], current_id[i], iy*BANDWIDTH+i );
}
else
{
if (ind[i] < num[i])
{
k = sites_x[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + k)/2.0);
current_id[i] = next_id[i];
next_id[i] = k;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[count] = ENCODE_STACK(iy*BANDWIDTH+i, current_id[i]);
}
count++;
}
}
//if (ix == 32 && iz == 1)
// printf("@@@ %d %d %d %d \n", ix, iy, iz, count);
//if (ix == 256 && iy == 5 && iz == 508)
// printf("test test %d %d \n", count, st_y);
st_y = sites_index[ix*res.x+iz];
i = atomicAdd(&counter[ix*res.x+iz],count);
for(j=0; j < count ; j++)
{
sites[st_y+i+j] = stack[j];
//if (ix == 256 && iy == 5 && iz == 508)
//if (ix == 25 && iz == 250)
// printf("@@ %d %d %d %d \n", j, i, GET_STACK(stack[j]), GET_PTR(stack[j]));
}
//}
/*if (iz == 0)
{
st = sites_index_x[iy*res+ix];
num = sites_index_x[iy*res+ix+1]-st;
if (num>0) current_id = sites_x[st];
prev_id = LDNIMARKER;
ind = 0;
}
if (num > 0)
{
if (iz%32 == 0)
{
value = bitDeleted[(iz/32)*res*res+iy*res+ix];
}
if (iz == current_id)
{
prev_id = current_id;
ind++;
if (ind >= num)
current_id = LDNIMARKER;
else
current_id = sites_x[st+ind];
}
if (!GetBitPos(iz%32, value))
{
dist = (abs((int)(prev_id-iz)) < abs((int)(current_id-iz)))? prev_id:current_id;
st_y = sites_index[ix*res+iz];
i = atomicAdd(&counter[ix*res+iz],1);
sites[st_y+i] = make_ushort2(iy, dist);
}
}*/
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GetProbablySiteInX(unsigned int *bitDeleted, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, unsigned int *sites_in, unsigned int *sites_index_in, int3 res, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int ix,iy,iz;
unsigned int bitresult, st_y, num_y;
unsigned int current_id[BANDWIDTH], next_id[BANDWIDTH];
short num[BANDWIDTH], ind[BANDWIDTH], i;
int j, k, count, temp;
unsigned int st[BANDWIDTH], stack[BANDWIDTH];
int middle_id[BANDWIDTH];
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
bitresult = 0;
if (iz == 0)
{
j = iy*BANDWIDTH;
for(i=0; i < BANDWIDTH; i++)
{
st[i] = sites_index_in[(j+i)*res.x+ix];
num[i] = sites_index_in[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites_in[st[i]];
next_id[i] = sites_in[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites_in[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
bitresult = ~bitDeleted[iy*res.x*res.z+ix*res.x+iz];
//if (__popc(bitresult)>0)
//{
count = 0;
for(i=0; i < BANDWIDTH ; i++)
{
if (num[i]>0)
{
if ((int)iz < middle_id[i])
{
if (GetBitPos(i, bitresult))
{
stack[count] = ENCODE_STACK_3(iy*BANDWIDTH+i, GET_STACK(current_id[i]), GET_PTR(current_id[i]));
count++;
}
}
else
{
if (ind[i] < num[i])
{
j = sites_in[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites_in[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos(i, bitresult))
{
stack[count] = ENCODE_STACK_3(iy*BANDWIDTH+i, GET_STACK(current_id[i]), GET_PTR(current_id[i]));
count++;
/*if (ix == 311 && iz == 256 && iy == 3 )
{
printf("middle %d %d %d %d %d %d %d %d %d %d\n", count, i,iy*BANDWIDTH+i, bitresult, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), GET_X(stack[]) );
}*/
}
}
//if (ix == 311 && iy == 9 && i == 0)
//{
//for(int test = 0; test < num[i]; test++)
//for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites_in[st[i]]+test, sites_in[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites_in[st[i]+test]), GET_PTR(sites_in[st[i]+test]));
//printf("%d %d %d \n", num[i], GET_STACK(sites_in[st[i]+test]), GET_PTR(sites_in[st[i]+test]));
//printf("%d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]));
//}
}
}
//if (ix == 32 && iz == 1)
// printf("@@@ %d %d %d %d \n", ix, iy, iz, count);
//if (ix == 256 && iy == 5 && iz == 508)
// printf("test test %d %d \n", count, st_y);
st_y = sites_index[ix*res.x+iz];
i = atomicAdd(&counter[ix*res.x+iz],count);
for(j=0; j < count ; j++)
{
sites[st_y+i+j] = stack[j];
//if (ix == 256 && iy == 5 && iz == 508)
//if (ix == 280 && iz == 280 && iy < 6)
// printf("@@ %d %d %d %d %d %d %d\n", bitresult, iy, j, i, GET_X(stack[j]), GET_Y(stack[j]), GET_Z(stack[j]));
//if (GET_X(stack[j]) == 25 && GET_Y(stack[j]) == 329 && GET_Z(stack[j]) == 293)
// printf("?? %d %d %d \n", ix, iy, iz);
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__CountProbablySiteInY(unsigned int *bitDeleted, unsigned int *counter, int res, int nodeNum)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz,count=0,value, st, num;
short i;
while(index<nodeNum) {
ix = index%res;
iy = index/res;
count = 0;
for (i = 0; i < res/32; i++)
{
value = ~bitDeleted[i*res*res+iy*res+ix];
count += __popc(value);
}
///if (ix == 0 && iy < 32)
// printf("no site !!! %d %d %d\n", ix, iy, count);
atomicAdd(&counter[iy*res+ix],count);
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInXLoop(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, ushort2 *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum, short loopID)
{
unsigned int index=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
ushort2 current_id[3];
short ind[3];
ushort2 prev_id[3];
unsigned int st[3], num[3], bitResult, bitCheck;
float x1, x2;
ushort2 p[3];
unsigned int count;
while(index<nodeNum) {
iy = index%res;
iz = (index%(chunksize*res)/res)/(chunksize/res);
ix = (index/(chunksize*res))*(chunksize/res)+(index%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
return;
}
if (iy > 0 && iy < res-1)
{
if (iz == 0)
{
st[1] = sites_index[iy*res+ix];
num[1] = sites_index[iy*res+ix+1]-st[1];
st[0] = sites_index[(iy-1)*res+ix];
num[0] = sites_index[(iy-1)*res+ix+1]-st[0];
if ((iy+loopID) < res)
{
st[2] = sites_index[(iy+loopID)*res+ix];
num[2] = sites_index[(iy+loopID)*res+ix+1]-st[2];
}
else
{
st[2] = 0;
num[2] = 0;
}
if (num[0]>0) current_id[0] = sites[st[0]];
if (num[1]>0) current_id[1] = sites[st[1]];
if (num[2]>0) current_id[2] = sites[st[2]];
//if (ix == 26 && iy == 25)
// printf("%d %d %d %d %d %d \n", num[0], num[1], num[2], current_id[0], current_id[1], current_id[2]);
prev_id[0] = make_ushort2(LDNIMARKER, LDNIMARKER); //iy-1
prev_id[1] = make_ushort2(LDNIMARKER, LDNIMARKER); //iy
prev_id[2] = make_ushort2(LDNIMARKER, LDNIMARKER); //iy+loopID
ind[0] = 0;
ind[1] = 0;
ind[2] = 0;
bitResult = 0;
bitCheck = 0;
count = 0;
}
if (num[0] > 0 && num[1] > 0 && num[2] > 0)
{
if (iz%32 == 0)
{
bitCheck = bitForNextLoop[(iz/32)*res*res+iy*res+ix];
}
//if (ix == 125 && iy == 256)
// printf("%d %d %d %d\n", iz, bitCheck,GetBitPos(iz%32, bitCheck) );
if (iz != current_id[1].x)
{
if ( GetBitPos(iz%32, bitCheck))
{
p[0] = ((prev_id[0].x-iz)*(prev_id[0].x-iz)+(prev_id[0].y-ix)*(prev_id[0].y-ix) < (current_id[0].x-iz)*(current_id[0].x-iz)+(current_id[0].y-ix)*(current_id[0].y-ix))? prev_id[0]:current_id[0];
p[1] = ((prev_id[1].x-iz)*(prev_id[1].x-iz)+(prev_id[1].y-ix)*(prev_id[1].y-ix) < (current_id[1].x-iz)*(current_id[1].x-iz)+(current_id[1].y-ix)*(current_id[1].y-ix))? prev_id[1]:current_id[1];
p[2] = ((prev_id[2].x-iz)*(prev_id[2].x-iz)+(prev_id[2].y-ix)*(prev_id[2].y-ix) < (current_id[2].x-iz)*(current_id[2].x-iz)+(current_id[2].y-ix)*(current_id[2].y-ix))? prev_id[2]:current_id[2];
x1 = interpointY(iy-1, p[0].x, p[0].y, iy, p[1].x, p[1].y, ix, iz) ;
x2 = interpointY(iy, p[1].x, p[1].y, iy+loopID, p[2].x, p[2].y, ix, iz) ;
if (x1 >= x2)
{
bitResult = bitResult | SetBitPos(iz%32);
count++;
}
}
}
else
{
prev_id[1] = current_id[1];
ind[1]++;
if (ind[1] >= num[1])
current_id[1] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[1] = sites[st[1]+ind[1]];
}
if (iz == current_id[0].x)
{
prev_id[0] = current_id[0];
ind[0]++;
if (ind[0] >= num[0])
current_id[0] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[0] = sites[st[0]+ind[0]];
}
if (iz == current_id[2].x)
{
prev_id[2] = current_id[2];
ind[2]++;
if (ind[2] >= num[2])
current_id[2] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[2] = sites[st[2]+ind[2]];
}
if ((iz+1)%32 == 0)
{
bitForNextLoop[(iz/32)*res*res+iy*res+ix]= bitResult;
atomicOr(&bitDeleted[(iz/32)*res*res+(iy+loopID-1)*res+ix], bitResult);
bitResult = 0;
}
if (iz == res-1)
{
atomicAdd(counter, count);
}
}
}
index += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInYLoop(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum, short loopID)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short current_id[3], ind[3];
short prev_id[3];
unsigned int st[3], num[3], bitResult, bitCheck;
float y1, y2;
short z[3];
unsigned int count;
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
return;
}
if (iy > 0 && iy < res-1)
{
if (iz == 0)
{
st[1] = sites_index[iy*res+ix];
num[1] = sites_index[iy*res+ix+1]-st[1];
st[0] = sites_index[(iy-1)*res+ix];
num[0] = sites_index[(iy-1)*res+ix+1]-st[0];
if ((iy+loopID) < res)
{
st[2] = sites_index[(iy+loopID)*res+ix];
num[2] = sites_index[(iy+loopID)*res+ix+1]-st[2];
}
else
{
st[2] = 0;
num[2] = 0;
}
if (num[0]>0) current_id[0] = sites[st[0]];
if (num[1]>0) current_id[1] = sites[st[1]];
if (num[2]>0) current_id[2] = sites[st[2]];
//if (ix == 26 && iy == 25)
// printf("%d %d %d %d %d %d \n", num[0], num[1], num[2], current_id[0], current_id[1], current_id[2]);
prev_id[0] = LDNIMARKER; //iy-1
prev_id[1] = LDNIMARKER; //iy
prev_id[2] = LDNIMARKER; //iy+loopID
ind[0] = 0;
ind[1] = 0;
ind[2] = 0;
bitResult = 0;
bitCheck = 0;
count = 0;
}
if (num[0] > 0 && num[1] > 0 && num[2] > 0 )//&& ix == 125 && ((iy <= 252 && iy>=200)))
{
if (iz%32 == 0)
{
bitCheck = bitForNextLoop[(iz/32)*res*res+iy*res+ix];
//if (ix == 26 && iy == 25)
// printf("%d %d \n", iz, bitCheck);
}
//if (ix == 125 && iy == 256)
// printf("%d %d %d %d\n", iz, bitCheck,GetBitPos(iz%32, bitCheck) );
if (iz != current_id[1])
{
if ( GetBitPos(iz%32, bitCheck))
{
z[0] = (abs((int)(prev_id[0]-iz)) < abs((int)(current_id[0]-iz)))? prev_id[0]:current_id[0];
z[1] = (abs((int)(prev_id[1]-iz)) < abs((int)(current_id[1]-iz)))? prev_id[1]:current_id[1];
z[2] = (abs((int)(prev_id[2]-iz)) < abs((int)(current_id[2]-iz)))? prev_id[2]:current_id[2];
y1 = interpointY(ix, iy-1, z[0], ix, iy, z[1], ix, iz) ;
y2 = interpointY(ix, iy, z[1], ix, iy+loopID, z[2], ix, iz) ;
if (y1 >= y2)
{
bitResult = bitResult | SetBitPos(iz%32);
count++;
}
/*if (ix == 26 && iy == 25)
{
printf("%d %d %d %d %f %f %d\n", iz, z[0], z[1], z[2], y1, y2, count);
printf(" %d %d %d %d %d %d \n", prev_id[0], prev_id[1], prev_id[2], current_id[0], current_id[1], current_id[2]);
}*/
if (ix == 125 && iy == 251 && iz == 211)
{
printf("%d %d %d %d %d %f %f %d\n", iy, iz, z[0], z[1], z[2], y1, y2, count);
//printf("a) %d %d %d %d %d %d %d %d \n",ix, iy-1, z[0], ix, iy, z[1], ix, iz);
//printf("b) %d %d %d %d %d %d %d %d \n",ix, iy, z[1], ix, iy+loopID, z[2], ix, iz);
//printf(" %d %d %d %d %d %d \n", prev_id[0], prev_id[1], prev_id[2], current_id[0], current_id[1], current_id[2]);
}
}
}
else
{
prev_id[1] = current_id[1];
ind[1]++;
if (ind[1] >= num[1])
current_id[1] = LDNIMARKER;
else
current_id[1] = sites[st[1]+ind[1]];
}
if (iz == current_id[0])
{
prev_id[0] = current_id[0];
ind[0]++;
if (ind[0] >= num[0])
current_id[0] = LDNIMARKER;
else
current_id[0] = sites[st[0]+ind[0]];
}
if (iz == current_id[2])
{
prev_id[2] = current_id[2];
ind[2]++;
if (ind[2] >= num[2])
current_id[2] = LDNIMARKER;
else
current_id[2] = sites[st[2]+ind[2]];
}
if ((iz+1)%32 == 0)
{
bitForNextLoop[(iz/32)*res*res+iy*res+ix]= bitResult;
atomicOr(&bitDeleted[(iz/32)*res*res+(iy+loopID-1)*res+ix], bitResult);
bitResult = 0;
}
if (iz == res-1)
{
//if (count > 0)
// printf("%d %d %d\n", ix, iy, count);
atomicAdd(counter, count);
}
}
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInX(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, ushort2 *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
ushort2 current_id[3];
short ind[3];
ushort2 prev_id[3];
unsigned int st[3], num[3], bitResult;
float x1, x2;
ushort2 p[3];
int count=0;
while(tid<nodeNum) {
iy = tid%res; // x axis
iz = (tid%(chunksize*res)/res)/(chunksize/res); // y axis
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res); // z axis
if (iz*res*res+iy*res+ix > nodeNum)
{
return;
}
if (iy > 0 && iy < res-1)
{
if (iz == 0)
{
st[1] = sites_index[iy*res+ix];
num[1] = sites_index[iy*res+ix+1]-st[1];
st[0] = sites_index[(iy-1)*res+ix];
num[0] = sites_index[(iy-1)*res+ix+1]-st[0];
st[2] = sites_index[(iy+1)*res+ix];
num[2] = sites_index[(iy+1)*res+ix+1]-st[2];
if (num[0]>0) current_id[0] = sites[st[0]];
if (num[1]>0) current_id[1] = sites[st[1]];
if (num[2]>0) current_id[2] = sites[st[2]];
prev_id[0] = make_ushort2(LDNIMARKER,LDNIMARKER); //iy-1
prev_id[1] = make_ushort2(LDNIMARKER,LDNIMARKER); //iy
prev_id[2] = make_ushort2(LDNIMARKER,LDNIMARKER); //iy+1
ind[0] = 0;
ind[1] = 0;
ind[2] = 0;
bitResult = 0;
count = 0;
}
if (num[0] > 0 && num[1] > 0 && num[2] > 0)
{
if (iz != current_id[1].x)
{
p[0] = ((prev_id[0].x-iz)*(prev_id[0].x-iz)+(prev_id[0].y-ix)*(prev_id[0].y-ix) < (current_id[0].x-iz)*(current_id[0].x-iz)+(current_id[0].y-ix)*(current_id[0].y-ix))? prev_id[0]:current_id[0];
p[1] = ((prev_id[1].x-iz)*(prev_id[1].x-iz)+(prev_id[1].y-ix)*(prev_id[1].y-ix) < (current_id[1].x-iz)*(current_id[1].x-iz)+(current_id[1].y-ix)*(current_id[1].y-ix))? prev_id[1]:current_id[1];
p[2] = ((prev_id[2].x-iz)*(prev_id[2].x-iz)+(prev_id[2].y-ix)*(prev_id[2].y-ix) < (current_id[2].x-iz)*(current_id[2].x-iz)+(current_id[2].y-ix)*(current_id[2].y-ix))? prev_id[2]:current_id[2];
x1 = interpointY(iy-1, p[0].x, p[0].y, iy, p[1].x, p[1].y, ix, iz) ;
x2 = interpointY(iy, p[1].x, p[1].y, iy+1, p[2].x, p[2].y, ix, iz) ;
if (x1 >= x2)
{
bitResult = bitResult | SetBitPos(iz%32);
count++;
}
}
else
{
prev_id[1] = current_id[1];
ind[1]++;
if (ind[1] >= num[1])
current_id[1] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[1] = sites[st[1]+ind[1]];
}
if (iz == current_id[0].x)
{
//if (ix == 125 && iy == 256)
// printf("--------------\n");
prev_id[0] = current_id[0];
ind[0]++;
if (ind[0] >= num[0])
current_id[0] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[0] = sites[st[0]+ind[0]];
}
if (iz == current_id[2].x)
{
prev_id[2] = current_id[2];
ind[2]++;
if (ind[2] >= num[2])
current_id[2] = make_ushort2(LDNIMARKER, LDNIMARKER);
else
current_id[2] = sites[st[2]+ind[2]];
}
if ((iz+1)%32 == 0)
{
bitForNextLoop[(iz/32)*res*res+iy*res+ix]= bitResult;
bitDeleted[(iz/32)*res*res+iy*res+ix]= bitResult;
bitResult = 0;
}
if (iz == res-1)
{
//if (iy==256)
// printf("count %d %d \n", ix, count);
atomicAdd(counter, count);
}
}
}
tid += blockDim.x * gridDim.x;
}
}
//#define ENCODE_16BIT(a, b) (((a) << 8) | (b))
__global__ void LDNIDistanceField__kernelMergeBandsY_32(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[32], ind[32], current_id[32], next_id[32];
float y_inter[32];
short num[32], i, j, k, count;
unsigned int st[32];
unsigned int stack[32]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
if (iy%warpWidth == 0)
{
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
//mask = 65535 << bandNum*(iy%warpWidth); // 65535 = 0x0000ffff
mask = bitresult;// & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (ix == 206 && iz == 300 )
{
printf("1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (ix == 206 && iz == 300 )
{
printf("2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsY_16(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[16], ind[16], current_id[16], next_id[16];
float y_inter[16];
short num[16], i, j, k, count;
unsigned int st[16];
unsigned int stack[16]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
if (iy%warpWidth == 0)
{
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 65535 << bandNum*(iy%warpWidth); // 65535 = 0x0000ffff
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("16-1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("16-2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
//if (ix == 503 && iz == 64)
// printf("-- %d %d \n", iy/warpWidth, ~bitresult);
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsY_8(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[8], ind[8], current_id[8], next_id[8];
float y_inter[8];
short num[8], i, j, k, count;
unsigned int st[8];
unsigned int stack[8]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
if (iy%warpWidth == 0)
{
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 255 << bandNum*(iy%warpWidth); // 255 = 0x000000ff
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d \n", i, iy , iy%32, lasty, bitresult, num[0], num[1]);
}*/
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(mask),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (ix == 256 && iz == 0 && iy == 65)
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
/*if (ix == 256 && iz == 500 && iy==81 )
{
printf("4=! %d %d %d %d %d %f\n", i, iy , iy%32, lasty, bitresult, y1);
}*/
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("8-1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("8-2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
//if (ix == 256 && iz == 500 && iy == 80)
//{
// printf("3=! %d %d \n", mask, count);
//}
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d %d %d\n", k, iy , iy%32, lasty, bitresult, i, j);
}*/
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d\n", j, iy , iy%32, count, GET_PTR(stack[count]));
}*/
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
//if (test > 40) break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsY_4(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[4], ind[4], current_id[4], next_id[4];
float y_inter[4];
short num[4], i, j, k, count;
unsigned int st[4];
unsigned int stack[4]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
if (iy%warpWidth == 0)
{
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 15 << bandNum*(iy%warpWidth); // 15 = 0x0000000f
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d \n", i, iy , iy%32, lasty, bitresult, num[0], num[1]);
}*/
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(mask),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (ix == 256 && iz == 0 && iy == 65)
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
/*if (iz == 1 && ix == 32 && (iy*bandNum+i)==326)
{
printf("4=! %d %d %d %d %d %f %d \n", i, iy , iy%32, lasty, bitresult, y1, (lasty%(warpSize*bandNum))/(bandNum));
}*/
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("4-1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("4-2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
//if (ix == 256 && iz == 500 && iy == 80)
//{
// printf("3=! %d %d \n", mask, count);
//}
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
//if (iz == 1 && ix == 32)
//{
// printf("4=! %d %d %d %d %d %d %d\n", k, iy , iy%32, lasty, bitresult, i, j);
//}
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
//if (iz == 1 && ix == 32)
//{
// printf("5=! %d %d %d %d %f\n", j, iy , iy%32, count, y_inter[j%bandNum]);
//}
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
//if (iz == 1 && ix == 32 && (i >= 300 && i <= 304))
// printf("7=! %d %d %d %d %f\n", i, iy , iy%32, count, y_inter[i%bandNum]);
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
//if (test > 40) break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsY_2(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[2], ind[2], current_id[2], next_id[2];
float y_inter[2];
short num[2], i, j, k, count;
unsigned int st[2];
unsigned int stack[2]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
//if (ix == 65 && j+i == 104 )
// printf(" %d %d %d %d %d %d\n", iz, num[i], stack[i], current_id[i], middle_id[i], sites[st[i]]);
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%2)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 3 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (bitCount(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d \n", i, iy , iy%32, lasty, bitresult, num[0], num[1]);
}*/
k = -1;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (iz < middle_id[i])
stack[i] = ENCODE_STACK(current_id[i], lasty);
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + j)/2.0);
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = ENCODE_STACK(current_id[i], lasty);
}
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
{
//stack[i] = ENCODE_STACK(-1, lasty);
stack[i] = ENCODE_STACK(k, lasty); // the last element in array = the last site
}
}
//------------------------------------------------------
// Calculate intersection point for each site
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(mask),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (ix == 256 && iz == 0 && iy == 65)
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
lasty = GET_PTR(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
else
{
y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
}
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_PTR(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
/*if (ix == 256 && iz == 500 && iy==81 )
{
printf("4=! %d %d %d %d %d %f\n", i, iy , iy%32, lasty, bitresult, y1);
}*/
//if (iz == 250 && ix == 431 && (iy*bandNum+i) == 96)
//{
// printf("1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431 )
//{
// printf("2-1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
//if (iz == 250 && ix == 431)
//{
// printf("2-2=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
//if (ix == 256 && iz == 500 && iy == 80)
//{
// printf("3=! %d %d \n", mask, count);
//}
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d %d %d\n", k, iy , iy%32, lasty, bitresult, i, j);
}*/
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_PTR(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d\n", j, iy , iy%32, count, GET_PTR(stack[count]));
}*/
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
/*
while (mask != 0)
{
lasty = __ffs(mask);
count = 0;
for(i=0; i < bandNum ; i++)
{
j = GET_PTR(stack[i]);
if ( j > 0)
{
lasty = __shfl(y_inter[j%2], (j%(warpSize*bandNum))/(bandNum));
if (y_inter[i] < lasty)
{
j = GET_PTR(lasty);
lasty = __shfl((int)stack[j%2], (j%(warpSize*bandNum))/(bandNum));
y_inter[i] = interpointY(ix, j, GET_STACK(lasty), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
lasty = GET_STACK(stack[i]);
stack[i] = ENCODE_STACK(lasty, k);
count++;
break;
}
}
}
mask = __ballot(count > 0);
}
//------------------------------------------------------
// Store the result
bitresult = 0;
lasty = -1;
if (iy%(warpSize/bandNum) == 0)
{
k = (iy/(warpSize/bandNum))%(bandNum);
lasty = -1;
for(j=warpSize/bandNum-1; j > 0 ; j--)
{
for(i=bandNum; i > 0 ; i--)
{
mask = __shfl((int)stack[i], k*(warpSize/bandNum)+j);
lasty = GET_PTR(mask);
if (lasty >= 0)
{
bitresult = bitresult | SetBitPos(j*bandNum+i);
break;
}
}
if (lasty >= 0) break;
}
while (lasty >= 0)
{
j = lasty%(warpSize*bandNum)/bandNum;
if (j/(warpSize/bandNum) != k) break;
mask = __shfl((int)stack[lasty%2], j);
lasty = GET_PTR(mask);
if (lasty > 0)
{
bitresult = bitresult | SetBitPos(j*bandNum+(lasty%bandNum));
}
}
if (k+1 < bandNum)
{
lasty = __shfl(lasty, (k+1)*(warpSize/bandNum));
if (lasty > 0)
{
bitresult = bitresult & (!(SetBitPos(lasty%32)-1));
}
}
bitDeleted[(iy/(warpSize/bandNum))*res.x*res.z+ix*res.x+iz] = bitresult;
}*/
tid += chunksize;
}
}
__global__ void LDNIDistanceField__MaurerAxisInY(unsigned int *bitDeleted, unsigned short *sites, unsigned int *sites_index, int3 res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short middle_id[BANDWIDTH], ind[BANDWIDTH], current_id[BANDWIDTH], next_id[BANDWIDTH];
short num[BANDWIDTH], i, j, k;
unsigned int st[BANDWIDTH];
short stack[BANDWIDTH], count;
//unsigned int bitresult[BANDWIDTH];
unsigned int bitresult;
float y1, y2;
short ptr[BANDWIDTH];
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
bitresult = 0;
if (iz == 0)
{
j = iy*BANDWIDTH;
for(i=0; i < BANDWIDTH; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
}
}
count = 0;
k = -1;
for(i=0; i < BANDWIDTH ; i++)
{
if (num[i]>0)
{
ptr[i] = k;
if (iz < middle_id[i])
stack[i] = current_id[i];
else
{
if (ind[i] < num[i])
{
k = sites[st[i]+ind[i]];
ind[i]++;
middle_id[i] = ceil((next_id[i] + k)/2.0);
current_id[i] = next_id[i];
next_id[i] = k;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = current_id[i];
}
count++;
k = i;
}
else
{
stack[i] = -1;
ptr[i] = k;
bitresult = bitresult | SetBitPos(i);
}
//if (iz == 250 && ix == 431 && iy == 3)
/*if (ix == 65 && iy == 3 && i == 8 )
printf(" %d %d %d %d %d %d\n", iz, num[i], stack[i], current_id[i], middle_id[i], sites[st[i]]);*/
}
if (count > 2)
{
k=0;
for(i=0; i < BANDWIDTH ; i++)
{
if (stack[i] > 0)
{
if (k < 2)
{
k++;
continue;
}
while (k>=2)
{
y1 = interpointY(ix, j+ptr[ptr[i]], stack[ptr[ptr[i]]], ix, j+ptr[i], stack[ptr[i]], ix, iz) ;
y2 = interpointY(ix, j+ptr[i], stack[ptr[i]], ix, j+i, stack[i], ix, iz) ;
if (y1 < y2)
break;
//if (iz == 250 && ix == 431 && iy < 4)
//{
// printf("ptr %d %f %f %d %d %d\n", j+i, y1, y2, k, j+ptr[i], stack[ptr[i]]);
//printf("y1 : %d %d %d %d %d %d %d %d \n", ix, j+ptr[ptr[i]], stack[ptr[ptr[i]]], ix, j+ptr[i], stack[ptr[i]], ix, iz);
//printf("y2 : %d %d %d %d %d %d %d %d \n", ix, j+ptr[i], stack[ptr[i]], ix, j+i, stack[i], ix, iz);
//}
//if (ix == 256 && (j+i) == 178 && iz == 0)
k--;
stack[ptr[i]] = -1;
bitresult = bitresult | SetBitPos(ptr[i]);
ptr[i] = ptr[ptr[i]];
}
k++;
}
}
bitDeleted[iy*res.x*res.z+ix*res.x+iz] = bitresult;
//if (iz == 250 && ix == 431 && iy < 4)
//if (ix == 256 && iz ==0)
// printf("--------------%d %d \n", iy, bitresult, count );
//for(i=0; i < BANDWIDTH ; i++)
//{
// bitDeleted[iy*res*res+ix*res+iz]
//}
}
else
{
bitDeleted[iy*res.x*res.z+ix*res.x+iz] = bitresult;
//if (ix == 256 && iz ==0)
// printf("--------------%d %d %d\n", iy, bitresult, count );
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_32(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[32], next_id[16];
float y_inter[32];
short ind[32], num[32], i;
int j, k, count;
unsigned int st[32];
int stack[32]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[32], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
//mask = 65535 << bandNum*(iy%warpWidth);
mask = bitresult;// & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (ix == 250 && iy*bandNum+i == 78)
//{
// printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
if (iz == 311 && ix == 256 )
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
if (iz == 311 && ix == 256 )
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_16(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[16], next_id[16];
float y_inter[16];
short ind[16], num[16], i;
int j, k, count;
unsigned int st[16];
int stack[16]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[16], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 65535 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (ix == 250 && iy*bandNum+i == 78)
//{
// printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 256 )
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 256 )
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_8(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[8], next_id[8];
float y_inter[8];
short ind[8], num[8], i;
int j, k, count;
unsigned int st[8];
int stack[8]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[8], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 255 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (ix == 250 && iy*bandNum+i == 78)
//{
// printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 500 )
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 500 )
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_4(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[4], next_id[4];
float y_inter[4];
short ind[4], num[4], i;
int j, k, count;
unsigned int st[4];
int stack[4]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[4], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%bandNum)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 15 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (__popc(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (ix == 250 && iy*bandNum+i == 78)
//{
// printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
}
//------------------------------------------------------
// Calculate intersection point for each site
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
/*if (iz == 280 && ix == 280 && iy == 30 )
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48 || iy == 49))
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
/*if (iz == 280 && ix == 280 && iy == 30 )
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
}
else
{
y_inter[i] = y1;
}
}
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 500 )
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 311 && ix == 500 )
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
if ((i-1) >= 0)
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
for(count=i%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[i%bandNum];
else break;
}
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__kernelMergeBandsX_2(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, const int bandNum, const int warpWidth, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
//unsigned int ix,iy,iz;
int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
int current_id[2], next_id[2];
float y_inter[2];
short ind[2], num[2], i;
int j, k, count;
unsigned int st[2];
int stack[2]; // stack + ptr
unsigned int bitresult = 0;
int lasty;
float y1, y2;
unsigned int mask = 0;
bool loopflag = false;
int middle_id[2], temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
/*if (iz > 280)
{
tid += chunksize;
continue;
}*/
if (iz == 0)
{
j = iy*bandNum;
for(i=0; i < bandNum; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (ix == 250 && j+i == 78 && iz == 0)
//{
//printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
// for(int test = 0; test < num[i] ; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//}
}
}
if (iy%warpWidth == 0)
{
//bitresult[(iy/(BANDWIDTH/bandNum))%2] = bitDeleted[(iy/(BANDWIDTH/bandNum))*res.x*res.z+ix*res.x+iz];
bitresult = ~bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz];
}
// broadcast the bitresult to other thread
bitresult = __shfl((int)bitresult, ((iy/warpWidth)%2)*warpWidth);
count = 0;
lasty = -1;
//define last bit for each band
//------------------------------------------------------
mask = 3 << bandNum*(iy%warpWidth);
mask = bitresult & mask;
if (bitCount(mask) > 0)
{
lasty = (iy/warpWidth)*warpSize
+ (32 - __ffs(__brev(mask))); // Get the most significant bit (__ffs return 1 - 32)
}
else
{
lasty = -1;
}
//link the last bit for each band ** can be optimize!!!!!!
//------------------------------------------------------
k = 0;
//lasty = __shfl(lasty, max(0,iy%32-1));
k = __shfl(lasty, (int)(iy%32-1));
if ((int)(iy%32-1) >= 0) lasty = k;
mask = __all(lasty >= 0);
k = 0;
while (mask == 0)
{
j = __shfl(lasty, (int)(iy%32-1));
if (lasty < 0 && (int)(iy%32-1) >= 0)
{
lasty = j;
}
k++;
mask = __all(lasty >= 0); // make sure all the thread obtain the
if (k >= warpSize) break; // in case, but should not happen
}
if (iy%32 == 0)
lasty = -1;
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d \n", i, iy , iy%32, lasty, bitresult, num[0], num[1]);
}*/
k = -1; temp = 0;
//------------------------------------------------------
// define stack (closest site on z axis and the pointer to previous site)
for(i=0; i < bandNum ; i++)
{
//if (GetBitPos((iy*bandNum+i)%32, bitresult))
//{
if ((int)iz < middle_id[i])
{
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
else
{
if (ind[i] < num[i])
{
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
while (temp <= middle_id[i])
{
next_id[i] = j;
j = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], j, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = j;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
count++;
lasty = iy*bandNum + i;
k = current_id[i]; // keep record the last current_id
}
else
stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
}
//if (iz == 280 && ix == 280 && iy*bandNum+i == 64)
//{
// printf("^^^^ %d %d %d %d %d %d %d %d %d %d %d\n",bitresult, iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i], GET_X(stack[i]), GET_Y(stack[i]), GET_Z(stack[i]) );
//for(int test = 0; test < num[i]; test++)
// printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
//printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
//}
// lasty = iy*bandNum + i;
//k = current_id[i]; // keep record the last current_id
//}
//else
//{
// stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
//}
}
//for(i=0; i < bandNum ; i++)
//{
// if (GetBitPos((iy*bandNum+i)%32, bitresult))
// {
// if ((int)iz < middle_id[i])
// stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
//
// else
// {
// if (ind[i] < num[i])
// {
// j = sites[st[i]+ind[i]];
// ind[i]++;
// temp = middlepointY(next_id[i], j, ix);
// while (temp <= middle_id[i])
// {
// next_id[i] = j;
// j = sites[st[i]+ind[i]];
// ind[i]++;
// temp = middlepointY(next_id[i], j, ix);
//
// }
//
// middle_id[i] = temp;
// current_id[i] = next_id[i];
// next_id[i] = j;
// }
// else
// {
// middle_id[i] = LDNIMARKER;
// current_id[i] = next_id[i];
// }
// stack[i] = ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty);
//
//
// }
// count++;
//
// //if (ix == 250 && iy*bandNum+i == 78)
// //{
// // printf("^^^^ %d %d %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty, ENCODE_STACK_3(GET_STACK(current_id[i]), GET_PTR(current_id[i]), lasty), stack[i] );
// //for(int test = 0; test < num[i]; test++)
// // printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*bandNum+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
// //printf("%d %d \n",GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]) );
// //}
//
// lasty = iy*bandNum + i;
// k = current_id[i]; // keep record the last current_id
//
//
//
// }
// else
// {
// stack[i] = ENCODE_STACK_3(GET_STACK(k), GET_PTR(k), lasty); // the last element in array = the last site
// }
//
//}
//------------------------------------------------------
// Calculate intersection point for each site
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(mask),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (ix == 256 && iz == 0 && iy == 65)
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
y1 = -1 * res.x* res.x;
for(i=0; i < bandNum ; i++)
{
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
mask = __shfl((int)stack[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum)); // always get the last element in array for the last site of other thread
/*if (ix == 280 && iz == 280 && iy*bandNum+i==116)
{
//printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
printf("%d %d %d \n", lasty, mask,GetBitPos((iy*bandNum+i)%32, bitresult) );
}*/
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
//if (lasty < res.x && GET_STACK(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
if (lasty < res.x && GET_X(stack[i]) < res.x && GET_Y(stack[i]) < res.x) // lasty < res.x --> make sure current site is linking to previous site
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum)) // when previous site is not in another thread
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(stack[lasty%bandNum]), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (ix == 280 && iz == 280 && iy*bandNum+i==116)
{
printf("2-a=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(stack[lasty%bandNum]), lasty, GET_Y(stack[lasty%bandNum]), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
//y_inter[i] = interpointY(ix, lasty, GET_STACK(mask), ix, iy*bandNum+i, GET_STACK(stack[i]), ix, iz) ;
y_inter[i] = interpointY(GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix) ;
/*if (iz == 280 && ix == 280 && iy*bandNum+i==116)
{
printf("2-b=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
else
{
y_inter[i] = y1;
}
y1 = y_inter[i];
/*if (iz == 280 && ix == 280 && iy*bandNum+i==116)
{
printf("2-d=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
else
{
y_inter[i] = y1;
/*if (iz == 280 && ix == 280 && iy*bandNum+i==116)
{
printf("2-c=! %d %d %d %d %d %d %d %d %d %d %f\n", i, iy , GET_X(mask), lasty, GET_Y(mask), GET_X(stack[i]), iy*bandNum+i, GET_Y(stack[i]), iz, ix, y_inter[i]);
}*/
}
}
/*if (ix == 256 && iz == 0 )
{
printf("=! %d %d %d %d %d %d %d %d %f \n", i, iy , iy%32, lasty, GET_STACK(stack[lasty%bandNum]),iy*bandNum+i,GET_STACK(stack[i]), mask, y_inter[i]);
}*/
/*if (iz == 111 && ix == 250 )
{
printf("bitresult ! %d %d %d \n", iy , iy%32, bitresult);
}*/
//------------------------------------------------------
// Start warp diverge
//bitresult = 0; // clear in every thread for storing result later
loopflag = true;
int test = 0;
while (loopflag)
{
loopflag = false;
mask = 0;
count = 0;
y2 = 0.0;
test++;
for(i=0; i < bandNum ; i++)
{
if (count > 0) break;
//lasty = GET_PTR(stack[i]);
lasty = GET_Z(stack[i]);
y1 = __shfl(y_inter[bandNum-1], (lasty%(warpSize*bandNum))/(bandNum));
/*if (ix == 256 && iz == 500 && iy==81 )
{
printf("4=! %d %d %d %d %d %f\n", i, iy , iy%32, lasty, bitresult, y1);
}*/
//if (iz == 250 && ix == 431 && (iy*bandNum+i) == 96)
//{
// printf("1=! %d %d %d %d %d %d %f %f\n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
//}
if (GetBitPos((iy*bandNum+i)%32, bitresult))
{
if (lasty < res.x )
{
if ((lasty%(warpSize*bandNum))/(bandNum) == ((iy*bandNum+i)%(warpSize*bandNum))/(bandNum))
{
y1 = y_inter[lasty%bandNum];
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 280 && ix == 280 && (iy*bandNum+i)>=100 && (iy*bandNum+i)< 125)
{
printf("2-1=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
/*if (iz == 111 && ix == 250 && (j+i) < 64 && (j+i)>=32)
{
printf("test test? %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
else
{
y2 = y_inter[i];
if (y1 >= y2)
{
count++;
if (count == 1) mask = ENCODE_STACK(lasty, iy*bandNum+i);
else mask = 0;
/*if (iz == 280 && ix == 280 && (iy*bandNum+i)>=100 && (iy*bandNum+i)< 125)
{
printf("2-2=! %d %d %d %d %d %d %f %f \n", i, iy , iy%32, lasty, bitresult, count, y1, y2);
}*/
}
else
{
if (count == 0) mask = 0;
}
}
}
else
{
if (count == 0) mask = 0;
}
}
else
{
if (count == 0) mask = 0;
}
}
lasty = mask;
mask = __ballot(count > 0);
//if (ix == 256 && iz == 500 && iy == 80)
//{
// printf("3=! %d %d \n", mask, count);
//}
// update the stack
if (mask > 0)
{
loopflag = true;
k = __ffs(mask);
lasty = __shfl(lasty, k-1);
i = GET_STACK(lasty);
j = GET_PTR(lasty);
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d %d %d\n", k, iy , iy%32, lasty, bitresult, i, j);
}*/
lasty = __shfl((int)stack[i%bandNum], (i%(warpSize*bandNum))/(bandNum));
k = GET_Z(lasty);
lasty = __shfl((int)stack[k%bandNum], (k%(warpSize*bandNum))/(bandNum));
if (iy == j/bandNum)
{
//stack[j%bandNum] = ENCODE_PTR(stack[j%bandNum], k);
stack[j%bandNum] = ENCODE_Z(stack[j%bandNum], k);
//y_inter[j%bandNum] = interpointY(ix, k, GET_STACK(lasty), ix, j, GET_STACK(stack[j%bandNum]), ix, iz) ;
y_inter[j%bandNum] = interpointY(GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix) ;
/*if (iz == 111 && ix == 250 && (iy == 48))
{
printf("tset %f %d \n %d %d %d %d %d %d %d %d\n", y_inter[j%bandNum], j, GET_X(lasty), k, GET_Y(lasty), GET_X(stack[j%bandNum]), j, GET_Y(stack[j%bandNum]), iz, ix);
}*/
for(count=j%bandNum+1; count < bandNum ; count++)
{
if (!GetBitPos((iy*bandNum+count)%32, bitresult))
y_inter[count] = y_inter[j%bandNum];
else break;
/*if (ix == 254 && iz == 500 )
{
printf("4=! %d %d %d %d %d\n", j, iy , iy%32, count, GET_PTR(stack[count]));
}*/
}
}
if (iy == i/bandNum)
{
bitresult = bitresult & ~(SetBitPos(i%32));
y_inter[i%bandNum] = y_inter[(max(0,i-1))%bandNum];
}
mask = __shfl((int)bitresult, (i%(warpSize*bandNum))/(bandNum));
if (i%(warpSize*bandNum)/warpSize == iy%(warpSize)/(warpWidth))
{
bitresult = mask;
}
}
else break;
}
if (iy%warpWidth == 0)
{
bitDeleted[(iy/warpWidth)*res.x*res.z+ix*res.x+iz] = ~bitresult;
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__MaurerAxisInX(unsigned int *bitDeleted, unsigned int *sites, unsigned int *sites_index, int3 res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int ind[BANDWIDTH], current_id[BANDWIDTH], next_id[BANDWIDTH];
short num[BANDWIDTH], i, j, count;
unsigned int st[BANDWIDTH];
unsigned int stack[BANDWIDTH];
unsigned int bitresult;
float y1, y2;
short ptr[BANDWIDTH];
int middle_id[BANDWIDTH], k, temp;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
bitresult = 0;
if (iz == 0)
{
j = iy*BANDWIDTH;
for(i=0; i < BANDWIDTH; i++)
{
st[i] = sites_index[(j+i)*res.x+ix];
num[i] = sites_index[(j+i)*res.x+ix+1]-st[i];
/*if (GET_STACK(sites[st[i]]) == 330 && GET_PTR(sites[st[i]]) == 291)
{
printf(" ^^ %d %d %d %d %d \n", ix, i, iz, GET_STACK(sites[st[i]]), GET_PTR(sites[st[i]]));
}*/
//if (iy == 1 && iz == 0 && ix == 1)
//{
// printf("how ? %d %d %d %d %d %d %d \n",num[i], GET_STACK(sites[st[i]]), GET_PTR(sites[st[i]]), GET_STACK(sites[st[i]+1]), GET_PTR(sites[st[i]+1]), GET_STACK(sites[st[i]+2]), GET_PTR(sites[st[i]+2]) );
//}
if (num[i] > 1)
{
current_id[i] = sites[st[i]];
next_id[i] = sites[st[i]+1];
ind[i] = 2;
middle_id[i] = middlepointY(current_id[i], next_id[i], ix);
//middle_id[i] = (short)ceil((current_id[i]+next_id[i])/2.0);
}
else if (num[i] == 1)
{
current_id[i] = sites[st[i]];
ind[i] = 1;
middle_id[i] = LDNIMARKER;
}
else
{
middle_id[i] = -1;
current_id[i] = LDNIMARKER;
next_id[i]= LDNIMARKER;
ind[i] = 0;
}
//if (iz == 25 && ix == 250)
// printf("num %d %d %d %d %d %d\n", num[i], current_id[i], next_id[i], j+i, middle_id[i]);
}
}
count = 0;
k = -1;
temp = 0;
for(i=0; i < BANDWIDTH ; i++)
{
if (num[i]>0)
{
ptr[i] = k;
if ((int)iz < middle_id[i])
stack[i] = current_id[i];
else
{
if (ind[i] < num[i])
{
k = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], k, ix);
while (temp <= middle_id[i])
{
next_id[i] = k;
k = sites[st[i]+ind[i]];
ind[i]++;
temp = middlepointY(next_id[i], k, ix);
}
middle_id[i] = temp;
current_id[i] = next_id[i];
next_id[i] = k;
}
else
{
middle_id[i] = LDNIMARKER;
current_id[i] = next_id[i];
}
stack[i] = current_id[i];
//if ( ix == 250 && iy == 2 && i == 14)
//{
// printf("stack %d %d %d %d %d %d\n", k, ind[i], iz, num[i], middle_id[i], temp);
//// printf("test~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n");
//}
}
count++;
/*if ( ix == 250 && iy == 2 && i == 14)
{
printf("stack %d %d %d %d %d \n", iz, middle_id[i], GET_STACK(current_id[i]), GET_PTR(current_id[i]), ind[i]);
}*/
//if (iy == 13 && i == 15 && ix == 250 )
//if ( ix == 250 && iy == 2 && i == 1 && iz == 0)
//if (iy == 0 && ix == 250 && i == 25)
/*if (ix == 250 && iy == 2 && i == 14 && iz == 111)
{
printf("stack %d %d %d %d %d %d\n", i, iz, num[i], GET_STACK(stack[i]), GET_PTR(stack[i]));
for(int test = 0; test < num[i] ; test++)
printf("%d %d %d %d %d %d\n", num[i], middlepointY(sites[st[i]]+test, sites[st[i]+test+1], ix), test, iy*BANDWIDTH+i, GET_STACK(sites[st[i]+test]), GET_PTR(sites[st[i]+test]));
}*/
k = i;
/*if ( ix == 250 && iy == 3)
{
printf("stack %d %d %d %d %d %d\n", k, ind[i], iz, num[i], middle_id[i], temp);
}*/
}
else
{
stack[i] = -1;
ptr[i] = k;
bitresult = bitresult | SetBitPos(i);
}
//printf("test test? %d %d %d %d %d %d %d \n", ptr[i], i, GET_STACK(current_id[i]),GET_PTR(current_id[i]), middle_id[i] , GET_STACK(next_id[i]),GET_PTR(next_id[i]));
}
if (count > 2)
{
k=0;
for(i=0; i < BANDWIDTH ; i++)
{
//if (iy == 0 && iz ==0 && ix == 0)
// printf("test test %d %d \n", count, stack[i] );
if (GET_PTR(stack[i]) < res.x || GET_STACK(stack[i]) < res.x)
{
if (k < 2)
{
k++;
continue;
}
while (k>=2)
{
//y1 = interpointY(ix, j+ptr[ptr[i]], stack[ptr[ptr[i]]], ix, j+ptr[i], stack[ptr[i]], ix, iz) ;
//y2 = interpointY(ix, j+ptr[i], stack[ptr[i]], ix, j+i, stack[i], ix, iz) ;
//y1 = interpointY(GET_PTR(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_STACK(stack[ptr[ptr[i]]]), GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), ix, iz) ;
//y2 = interpointY(GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_PTR(stack[i]), j+i, GET_STACK(stack[i]), ix, iz) ;
y1 = interpointY(GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), iz, ix) ;
y2 = interpointY(GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), iz, ix) ;
//if (ix == 256 && (j+i) == 178 && iz == 0)
//{
// printf("ptr %d %f %f %d %d %d\n", j+i, y1, y2, k, j, i);
// printf("y1 : %d %d %d %d %d %d %d %d \n", ix, j+ptr[ptr[i]], stack[ptr[ptr[i]]], ix, j+ptr[i], stack[ptr[i]], ix, iz);
// printf("y2 : %d %d %d %d %d %d %d %d \n", ix, j+ptr[i], stack[ptr[i]], ix, j+i, stack[i], ix, iz);
//}
/*if (iy == 1 && iz == 0 && ix == 1)
{
printf("test test? %d %d %f %f %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]) );
printf("y1 : %d %d %d %d %d %d %d %d \n", GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz) ;
printf("y2 : %d %d %d %d %d %d %d %d \n", GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz) ;
}*/
/*if (ix == 250 && j+i == 78 && iz == 111)
{
printf("test test? %d %d %f %f %d %d %d \n", ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]) );
printf("y1 : %d %d %d %d %d %d %d %d \n", GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz) ;
printf("y2 : %d %d %d %d %d %d %d %d \n", GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz) ;
}*/
//if ((j+i) >= 420 && (j+i) <= 440 && iz == 111 && ix == 250)
if (y1 < y2)
break;
//if ((j+i) == 430 && iz == 111 && ix == 250)
/*if (iz == 280 && ix == 280 && (j+i) < 128 && (j+i)>=96)
{
printf("test test? %d %d %d %f %f %d %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", bitresult, ptr[i], i, y1, y2, GET_PTR(stack[ptr[i]]), j+ptr[i], GET_STACK(stack[ptr[i]]), GET_STACK(stack[ptr[ptr[i]]]), j+ptr[ptr[i]], GET_PTR(stack[ptr[ptr[i]]]), GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), ix, iz, GET_STACK(stack[ptr[i]]), j+ptr[i], GET_PTR(stack[ptr[i]]), GET_STACK(stack[i]), j+i, GET_PTR(stack[i]), ix, iz);
}*/
k--;
stack[ptr[i]] = -1;
bitresult = bitresult | SetBitPos(ptr[i]);
ptr[i] = ptr[ptr[i]];
}
k++;
}
}
bitDeleted[iy*res.x*res.z+ix*res.x+iz] = bitresult;
//if (ix == 256 && iz ==0)
// printf("--------------%d %d \n", iy, bitresult, count );
//for(i=0; i < BANDWIDTH ; i++)
//{
// bitDeleted[iy*res*res+ix*res+iz]
//}
}
else
{
bitDeleted[iy*res.x*res.z+ix*res.x+iz] = bitresult;
//if (ix == 256 && iz ==0)
// printf("--------------%d %d %d\n", iy, bitresult, count );
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInY(unsigned int *bitDeleted, unsigned int *bitForNextLoop, unsigned int *counter, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int chunksize = blockDim.x * gridDim.x;
short current_id[3], ind[3];
short prev_id[3];
unsigned int st[3], num[3], bitResult;
float y1, y2;
short z[3];
int count=0;
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
return;
}
if (iy > 0 && iy < res-1)
{
if (iz == 0)
{
st[1] = sites_index[iy*res+ix];
num[1] = sites_index[iy*res+ix+1]-st[1];
st[0] = sites_index[(iy-1)*res+ix];
num[0] = sites_index[(iy-1)*res+ix+1]-st[0];
st[2] = sites_index[(iy+1)*res+ix];
num[2] = sites_index[(iy+1)*res+ix+1]-st[2];
if (num[0]>0) current_id[0] = sites[st[0]];
if (num[1]>0) current_id[1] = sites[st[1]];
if (num[2]>0) current_id[2] = sites[st[2]];
prev_id[0] = LDNIMARKER; //iy-1
prev_id[1] = LDNIMARKER; //iy
prev_id[2] = LDNIMARKER; //iy+1
ind[0] = 0;
ind[1] = 0;
ind[2] = 0;
bitResult = 0;
count = 0;
//if (ix == 125 && iy == 251)
//if (ix == 125 && (iy <=252 && iy>=200))
//{
//printf("%d %d %d \n",num[0], num[1], num[2] );
/*for(int i=0; i<num[0]; i++)
{
printf("sites 0 : %d \n",sites[st[0]+i]);
}*/
//for(int i=0; i<num[1]; i++)
//{
// printf("sites 1 : %d %d %d \n",ix, iy, sites[st[1]+i]);
//}
//printf("------------- \n");
/*for(int i=0; i<num[2]; i++)
{
printf("sites 2 : %d \n",sites[st[2]+i]);
}*/
//}
}
if (num[0] > 0 && num[1] > 0 && num[2] > 0)
{
if (iz != current_id[1])
{
z[0] = (abs((int)(prev_id[0]-iz)) < abs((int)(current_id[0]-iz)))? prev_id[0]:current_id[0];
z[1] = (abs((int)(prev_id[1]-iz)) < abs((int)(current_id[1]-iz)))? prev_id[1]:current_id[1];
z[2] = (abs((int)(prev_id[2]-iz)) < abs((int)(current_id[2]-iz)))? prev_id[2]:current_id[2];
y1 = interpointY(ix, iy-1, z[0], ix, iy, z[1], ix, iz) ;
y2 = interpointY(ix, iy, z[1], ix, iy+1, z[2], ix, iz) ;
if (ix == 125 && iy == 251 && iz == 211)
{
printf("%d %d %d %d %f %f %d\n", iz, z[0], z[1], z[2], y1, y2, count);
printf("a) %d %d %d %d %d %d %d %d \n",ix, iy-1, z[0], ix, iy, z[1], ix, iz);
printf("b) %d %d %d %d %d %d %d %d \n",ix, iy, z[1], ix, iy+1, z[2], ix, iz);
printf(" %d %d %d %d %d %d \n", prev_id[0], prev_id[1], prev_id[2], current_id[0], current_id[1], current_id[2]);
}
if (y1 >= y2)
{
bitResult = bitResult | SetBitPos(iz%32);
count++;
}
}
else
{
prev_id[1] = current_id[1];
ind[1]++;
if (ind[1] >= num[1])
current_id[1] = LDNIMARKER;
else
current_id[1] = sites[st[1]+ind[1]];
}
if (iz == current_id[0])
{
//if (ix == 125 && iy == 256)
// printf("--------------\n");
prev_id[0] = current_id[0];
ind[0]++;
if (ind[0] >= num[0])
current_id[0] = LDNIMARKER;
else
current_id[0] = sites[st[0]+ind[0]];
}
if (iz == current_id[2])
{
prev_id[2] = current_id[2];
ind[2]++;
if (ind[2] >= num[2])
current_id[2] = LDNIMARKER;
else
current_id[2] = sites[st[2]+ind[2]];
}
if ((iz+1)%32 == 0)
{
bitForNextLoop[(iz/32)*res*res+iy*res+ix]= bitResult;
bitDeleted[(iz/32)*res*res+iy*res+ix]= bitResult;
bitResult = 0;
}
if (iz == res-1)
{
//if (iy==256)
// printf("count %d %d \n", ix, count);
atomicAdd(counter, count);
}
}
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInYByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int st = 0, num = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int temp;
int currentSite, prevSite, dist1, dist2;
short currentIndex, ind;
float d;
short i,j;
unsigned int buffer[THREADS_PER_BLOCK] = {0};
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
//printf("error %d %d %d %d %d %d\n", tid, ix, iy, iz,(tid/(chunksize*res)),(tid%(chunksize*res)/(res*res)) );
return;
}
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num > 0)
currentSite = sites[st];
prevSite = 0;
currentIndex = 0;
}
if (num > 0)
{
//if (ix ==512 && iy == 512)
// printf("tid %d %d %d %d %d %d %d %d \n", iz, num, st, prevSite, currentSite, currentIndex, sites[st], sites[st+1]);
if (iz == currentSite)
{
prevSite = currentSite;
currentIndex++;
if (currentIndex >= num)
{
prevSite = 0;
}
//{currentSite = 0;}
else
{currentSite = sites[st+currentIndex];}
}
//if (prevSite <=0 && currentSite > 0)
if (prevSite <=0)
{
dist1 = abs((int)iz-currentSite);
//if(dist1 <= offsetPixel && iz <= currentSite)
if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
/*else if (prevSite > 0 && currentSite <= 0)
{
dist2 = abs((int)iz-prevSite);
if(dist2 <= offsetPixel && iz >= prevSite)
//if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else if (prevSite > 0 && currentSite > 0)*/
else
{
dist2 = abs((int)iz-currentSite);
dist1 = abs((int)iz-prevSite);
if (dist1 <= offsetPixel || dist2 <=offsetPixel)
{
//if (dist1 <= dist2 && iz <= prevSite)
if (dist1 <= dist2)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
}
//else if (dist1 > dist2 && iz <= currentSite)
else
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d %d\n", iz, dist1, dist2, ind, prevSite, currentSite);
}
}
}
if ((iz+1)%32 == 0 && num>0)
{
j=0;
//for(i=max(0,iy-offsetPixel); i<=min(res,iy+offsetPixel); j++,i++)
/*for(i=iy-offsetPixel; i<=iy+offsetPixel; j++,i++)
{
if (i<0 || i >= res) continue;
//if (buffer[j]!=0 && bitSites[(iz/32)*res*res+i*res+ix]!= buffer[j])
if (buffer[j]!=0)
{
atomicOr(&bitSites[(iz/32)*res*res+i*res+ix], buffer[j] );
}
}*/
for(j=0;j<offsetPixel*2+1;j++)
buffer[j]=0;
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__GenerateProbablySiteInXByGivenDistance(unsigned int *bitSites, unsigned short *sites, unsigned int *sites_index, int res, int offsetPixel, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
unsigned int st = 0, num = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
unsigned int temp;
int currentSite, prevSite, dist1, dist2;
short currentIndex, ind;
float d;
short i,j;
unsigned int buffer[THREADS_PER_BLOCK] = {0};
while(tid<nodeNum) {
iy = tid%res;
iz = (tid%(chunksize*res)/res)/(chunksize/res);
ix = (tid/(chunksize*res))*(chunksize/res)+(tid%(chunksize*res)%(chunksize)/res);
if (iz*res*res+iy*res+ix > nodeNum)
{
//printf("error %d %d %d %d %d %d\n", tid, ix, iy, iz,(tid/(chunksize*res)),(tid%(chunksize*res)/(res*res)) );
return;
}
if (iz == 0)
{
st = sites_index[iy*res+ix];
num = sites_index[iy*res+ix+1]-st;
if (num > 0)
currentSite = sites[st];
prevSite = 0;
currentIndex = 0;
}
if (num > 0)
{
//if (ix ==512 && iy == 512)
// printf("tid %d %d %d %d %d %d %d %d \n", iz, num, st, prevSite, currentSite, currentIndex, sites[st], sites[st+1]);
if (iz == currentSite)
{
prevSite = currentSite;
currentIndex++;
if (currentIndex >= num)
{prevSite = 0;}
else
{currentSite = sites[st+currentIndex];}
}
if (prevSite <=0)
{
dist1 = abs((int)iz-currentSite);
if(dist1 <= offsetPixel)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
//printf("test %d %d %d %d %d\n", iz, dist1, ind, prevSite, currentSite);
}
}
else
{
dist2 = abs((int)iz-currentSite);
dist1 = abs((int)iz-prevSite);
if (dist1 <= offsetPixel || dist2 <=offsetPixel)
{
if (dist1 <= dist2)
{
d = sqrt((float)(offsetPixel*offsetPixel - dist1*dist1));
}
else
{
d = sqrt((float)(offsetPixel*offsetPixel - dist2*dist2));
}
ind = (int)d;
if (d >= 1)
{
//buffer[offsetPixel-ind] = buffer[offsetPixel-ind] | SetBitPos(iz%32);
//buffer[offsetPixel+ind] = buffer[offsetPixel+ind] | SetBitPos(iz%32);
temp = SetBitPos(iz%32);
for(i=offsetPixel-ind; i <= offsetPixel+ind; i++)
{
buffer[i] = buffer[i] | temp;
}
}
else
{
buffer[offsetPixel] = buffer[offsetPixel] | SetBitPos(iz%32);
}
//if (ix ==512 && iy == 512)
// printf("test %d %d %d %d %d %d\n", iz, dist1, dist2, ind, prevSite, currentSite);
}
}
}
if ((iz+1)%32 == 0 && num>0)
{
j=0;
//for(i=max(0,iy-offsetPixel); i<=min(res,iy+offsetPixel); j++,i++)
for(i=ix-offsetPixel; i<=ix+offsetPixel; j++,i++)
{
if (i<0 || i >= res) continue;
if (buffer[j]!=0)
{
atomicOr(&bitSites[(iz/32)*res*res+iy*res+i], buffer[j] );
}
}
for(j=0;j<offsetPixel*2+1;j++)
buffer[j]=0;
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeTexToArray(unsigned short *d_output, int res, unsigned int *table_index, unsigned int* temp_index, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
uint4 temp;
unsigned int num=0,i,st,v,a=0,ind, count = 0;
unsigned int chunksize = blockDim.x * gridDim.x;
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
st = table_index[iy*res+ix];
num = table_index[iy*res+ix+1]-st;
if (num>0) {
count = bitCount(temp.x);
ind=0;
if (count>0)
{
ind = atomicAdd(&temp_index[iy*res+ix],count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.x);
a = 1 << v;
d_output[st+ind+i] = iz*128 + v;
temp.x = temp.x & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.y);
if (count>0)
{
ind = atomicAdd(&temp_index[iy*res+ix],count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.y);
a = 1 << v;
d_output[st+ind+i] = iz*128 + 32 + v;
temp.y = temp.y & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.z);
if (count>0)
{
ind = atomicAdd(&temp_index[iy*res+ix],count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.z);
a = 1 << v;
d_output[st+ind+i] = iz*128 + 64 + v;
temp.z = temp.z & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.w);
if (count>0)
{
ind = atomicAdd(&temp_index[iy*res+ix],count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.w);
a = 1 << v;
d_output[st+ind+i] = iz*128 + 96 + v;
temp.w = temp.w & (~a);
}
}
//if (ix == 512 && iy == 512)
// printf("what %d %d \n", d_output[st], d_output[st+1]);
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void PBADistanceField__writeCompactArray(int *d_output, int *d_input, unsigned int *counter, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int i;
while(tid<nodeNum) {
if (d_input[tid]> -1)
{
i = atomicAdd(counter, 1);
d_output[i] = d_input[tid];
//if (i == 307076)
// printf("$$$$$ %d %d %d %d %d \n", i, d_input[90000000], GET_X(d_input[90000000]), GET_Y(d_input[90000000]), GET_Z(d_input[90000000]) );
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void PBADistanceField__writeTexToArray(int *d_output, int res, int nodeNum, unsigned int* counter)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int ix,iy,iz;
uint4 temp;
unsigned int i,id, count;
unsigned int chunksize = blockDim.x * gridDim.x;
int marker = -1;
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
id=0;
count = 0;
for(i=0; i < 32; i++)
{
id = TOID(ix, iy, iz*128+i, res);
if (GetBitPos(i,temp.x))
{
d_output[id] = ENCODE(ix, iy, iz*128+i);
//if (ix == 125 && iy == 250)
count++;
}
else
{
d_output[id] = -1;
}
}
id=0;
for(i=0; i < 32; i++)
{
id = TOID(ix, iy, iz*128+32+i, res);
if (GetBitPos(i,temp.y))
{
d_output[id] = ENCODE(ix, iy, iz*128+32+i);
count++;
}
else
{
d_output[id] = -1;
}
}
id=0;
for(i=0; i < 32; i++)
{
id = TOID(ix, iy, iz*128+64+i, res);
if (GetBitPos(i,temp.z))
{
d_output[id] = ENCODE(ix, iy, iz*128+64+i);
count++;
}
else
{
d_output[id] = -1;
}
}
id=0;
for(i=0; i < 32; i++)
{
id = TOID(ix, iy, iz*128+96+i, res);
if (GetBitPos(i,temp.w))
{
d_output[id] = ENCODE(ix, iy, iz*128+96+i);
count++;
}
else
{
d_output[id] = -1;
}
}
atomicAdd(counter, count);
tid += chunksize;
}
}
__global__ void LDNIDistanceField__Sort2DArray(unsigned short *d_output, unsigned int *d_index, int res, int nodeNum)
{
unsigned int tid=threadIdx.x+blockIdx.x*blockDim.x;
unsigned int st,num,i,j;
unsigned short tempdepth;
unsigned short depth[512];
while(tid<nodeNum) {
st = d_index[tid];
num = d_index[tid+1]-st;
if (num > 0)
{
if (num > 512) { printf("too many num on one thread!!! %d\n", num); return;};
for(i=0;i<num;i++) depth[i]=d_output[st+i];
for(i=0;i<num;i++) {
for(j=i+1;j<num;j++) {
if (depth[i]>depth[j]) {
printf("sort need ? %d %d \n", depth[i], depth[j]);
tempdepth=depth[i]; depth[i]=depth[j]; depth[j]=tempdepth;
}
}
}
for(i=0;i<num;i++) d_output[st+i]=depth[i];
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__Test(float3 *d_output, int res, unsigned int *counter, ushort2 *site, unsigned int* site_index, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int st, num;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
//ushort3 temp;
ushort2 temp;
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res);
st = site_index[iy*res+ix];
num = site_index[iy*res+ix+1]-st;
if (num > 0)
{
ind = atomicAdd(counter,num);
for(i=0; i < num; i++)
{
temp = site[st+i];
//d_output[ind+i] = make_float3(origin.x+(ww*(temp.x))*width, origin.x+(gw*temp.y), origin.y+(gw*temp.z));
d_output[ind+i] = make_float3(origin.x+(gw*iy), origin.y+(gw*temp.x), origin.z+(gw*temp.y));
}
}
//if (count>0) {
// ind = atomicAdd(counter,count);
// for(i=0; i < count ; i++){
// v = GetFirstBitPos(temp);
// a = 1 << v;
// //d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*32+v))*width);
// d_output[ind+i] = make_float3(origin.x+(ww*(iz*32+v))*width, origin.x+(gw*ix), origin.y+(gw*iy));
// temp = temp & (~a);
// }
//}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeSitesToVBO(float3 *d_output, int res, unsigned int *counter, unsigned int* d_input, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int temp;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = d_input[iz*res*res+iy*res+ix];
count = bitCount(temp);
if (count>0) {
ind = atomicAdd(counter,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp);
a = 1 << v;
//d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*32+v))*width);
d_output[ind+i] = make_float3(origin.x+(ww*(iz*32+v))*width, origin.x+(gw*ix), origin.y+(gw*iy));
temp = temp & (~a);
}
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeResultToVBO(float3 *d_output, int3 res, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, int offdist, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int st, num, ind;
unsigned int current_id, next_id;
unsigned int chunksize = blockDim.x * gridDim.x;
int middle_id, k, temp;
double dist = 0.0;
int dx, dy, dz, id;
float ww = 1.0/float(res.x);
float gw = width/float(res.x);
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
st = sites_index[iy*res.x+ix];
num = sites_index[iy*res.x+ix+1]-st;
if (num > 1)
{
current_id = sites[st];
next_id = sites[st+1];
ind = 2;
middle_id = middlepointX(current_id, next_id, ix , iy);
}
else if (num == 1)
{
current_id = sites[st];
ind = 1;
middle_id = LDNIMARKER;
}
else
{
middle_id = -1;
current_id = LDNIMARKER;
next_id= LDNIMARKER;
ind = 0;
}
}
if (num > 0)
{
if (iz < middle_id)
{
dx = GET_X(current_id)-iz; dy = GET_Y(current_id)-ix; dz = GET_Z(current_id)-iy;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
id = atomicAdd(counter, 1);
d_output[id] = make_float3(origin.x+(gw*iz), origin.y+(gw*ix), origin.z+(gw*iy));
}
}
else
{
if (ind < num)
{
k = sites[st+ind];
ind++;
temp = middlepointX(next_id, k, ix , iy);
while (temp <= middle_id || iz >= temp)
{
next_id = k;
k = sites[st+ind];
ind++;
temp = middlepointX(next_id, k, ix , iy);
}
middle_id = temp;
current_id = next_id;
next_id = k;
}
else
{
middle_id = LDNIMARKER;
current_id = next_id;
}
dx = GET_X(current_id)-iz; dy = GET_Y(current_id)-ix; dz = GET_Z(current_id)-iy;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
id = atomicAdd(counter, 1);
d_output[id] = make_float3(origin.x+(gw*iz), origin.y+(gw*ix), origin.z+(gw*iy));
}
}
//if (ix == 256 && iy == 311)
// printf("current %d %d %d %d %d \n", iz, middle_id, GET_X(current_id), GET_Y(current_id), GET_Z(current_id));
}
tid += chunksize;
}
}
__global__ void LDNIDistanceField__countArrayToVBO(int3 res, unsigned int* counter, unsigned int *sites, unsigned int *sites_index, int offdist, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int st, num, ind;
unsigned int current_id, next_id;
unsigned int chunksize = blockDim.x * gridDim.x;
int middle_id, k, temp;
double dist = 0.0;
int dx, dy, dz;
while(tid<nodeNum) {
iy = (tid%res.z)%res.y;
iz = (tid%(chunksize*res.z))/chunksize;
ix = (tid/(chunksize*res.z))*(chunksize/res.y)+(tid%(chunksize*res.z)%(chunksize)/res.y);
if (iz == 0)
{
st = sites_index[iy*res.x+ix];
num = sites_index[iy*res.x+ix+1]-st;
if (num > 1)
{
current_id = sites[st];
next_id = sites[st+1];
ind = 2;
middle_id = middlepointX(current_id, next_id, ix , iy);
}
else if (num == 1)
{
current_id = sites[st];
ind = 1;
middle_id = LDNIMARKER;
}
else
{
middle_id = -1;
current_id = LDNIMARKER;
next_id= LDNIMARKER;
ind = 0;
}
}
if (num > 0)
{
if (iz < middle_id)
{
dx = GET_X(current_id)-iz; dy = GET_Y(current_id)-ix; dz = GET_Z(current_id)-iy;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
atomicAdd(counter, 1);
}
}
else
{
if (ind < num)
{
k = sites[st+ind];
ind++;
temp = middlepointX(next_id, k, ix , iy);
while (temp <= middle_id || iz >= temp)
{
next_id = k;
k = sites[st+ind];
ind++;
temp = middlepointX(next_id, k, ix , iy);
}
middle_id = temp;
current_id = next_id;
next_id = k;
}
else
{
middle_id = LDNIMARKER;
current_id = next_id;
}
dx = GET_X(current_id)-iz; dy = GET_Y(current_id)-ix; dz = GET_Z(current_id)-iy;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
atomicAdd(counter, 1);
}
}
}
tid += chunksize;
}
}
__global__ void PBADistanceField__countArrayToVBO(int res, unsigned int* counter, int *outputDF, int offdist, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
int dx, dy, dz;
int nx, ny, nz;
int id;
double dist = 0.0;
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
// if (ix == 125 && iy == 250)
// printf("dist 0--------------%f %d %d %d \n", dist, ix, iy, iz);
DECODE(outputDF[tid], nx, ny, nz);
//if (ix == 0 && iy == 245 && iz == 231)
// printf("dist 0--------------%d %d %d %d %d %d %d \n", outputDF[tid], nx, ny , nz , ix, iy, iz);
dx = nx - ix; dy = ny - iy; dz = nz - iz;
dist = dx * dx + dy * dy + dz * dz;
dist = sqrt(dist);
if ((int)dist == offdist )
{
atomicAdd(counter, 1);
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void PBADistanceField__writeArrayToVBO(float3 *d_output, int res, unsigned int* counter, int *outputDF, int offdist, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
int dx, dy, dz;
int nx, ny, nz;
int id;
double dist, dist2;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
DECODE(outputDF[tid], nx, ny, nz);
dx = nx - ix; dy = ny - iy; dz = nz - iz;
dist = dx * dx + dy * dy + dz * dz;
dist2 = sqrt(dist);
if (floor(dist2) == offdist )
//if (dist >= offdist && dist <= offdist+1)
{
id = atomicAdd(counter, 1);
d_output[id] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(gw*iz));
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeArrayToVBO(float3 *d_output, int res, unsigned int* table_index, unsigned int *m_3dArray, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
unsigned int temp;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = m_3dArray[tid];
count = bitCount(temp);
if (count>0) {
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*32+v))*width);
temp = temp & (~a);
}
}
tid += blockDim.x * gridDim.x;
}
}
__global__ void LDNIDistanceField__writeTexToVBO(float3 *d_output, int res, int* table_index, float width, float3 origin, int nodeNum)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
uint4 temp;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
temp = tex3D(site_tex,ix,iy,iz);
count = bitCount(temp.x);
if (count>0) {
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.x);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*128+v))*width);
temp.x = temp.x & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.y);
if (count>0){
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.y);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*128+32+v))*width);
temp.y = temp.y & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.z);
if (count>0){
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.z);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*128+64+v))*width);
temp.z = temp.z & (~a);
}
}
a=0; v=0; count=0;
count = bitCount(temp.w);
if (count>0){
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(temp.w);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*128+96+v))*width);
temp.w = temp.w & (~a);
}
}
tid += blockDim.x * gridDim.x;
}
/*int tid=threadIdx.x+blockIdx.x*blockDim.x;
int ix,iy,iz;
//uint4 temp;
unsigned int value;
unsigned int count=0,i,ind,v,a=0;
float ww = 1.0/float(res);
float gw = width/float(res);
while(tid<nodeNum) {
ix=tid%res; iy=(tid/res)%res; iz=(tid/(res*res));
//temp = tex3D(uint_tex3D,ix,iy,iz);
value = tex1D(site_tex, tid);
count = bitCount(value);
if (count>0) {
ind = atomicAdd(table_index,count);
for(i=0; i < count ; i++){
v = GetFirstBitPos(value);
a = 1 << v;
d_output[ind+i] = make_float3(origin.x+(gw*ix), origin.y+(gw*iy), origin.z+(ww*(iz*32+v))*width);
value = value & (~a);
}
}
tid += blockDim.x * gridDim.x;
}*/
}
__global__ void PBADistanceField_kernelPropagateInterband(int *output, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int inc = bandSize * size * size;
int nz, nid, nDist, myDist;
int pixel;
// Top row, look backward
int tz = __mul24(band, bandSize);
int topId = TOID(tx, ty, tz, size);
int bottomId = TOID(tx, ty, tz + bandSize - 1, size);
pixel = tex1Dfetch(pbaTexColor, topId);
nz = GET_Z(pixel);
myDist = abs(nz - tz);
for (nid = bottomId - inc; nid >= 0; nid -= inc) {
pixel = tex1Dfetch(pbaTexColor, nid);
if (pixel != PBAMARKER) {
nz = pixel & 0x3ff;
nDist = abs(nz - tz);
if (nDist < myDist)
output[topId] = pixel;
break;
}
}
// Last row, look downward
tz = tz + bandSize - 1;
pixel = tex1Dfetch(pbaTexColor, bottomId);
nz = GET_Z(pixel);
myDist = abs(nz - tz);
for (int ii = tz + 1, nid = topId + inc; ii < size; ii += bandSize, nid += inc) {
pixel = tex1Dfetch(pbaTexColor, nid);
if (pixel != PBAMARKER) {
nz = pixel & 0x3ff;
nDist = abs(nz - tz);
if (nDist < myDist)
output[bottomId] = pixel;
break;
}
}
}
__global__ void PBADistanceField_kernelFloodZ(int *output, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tz = band * bandSize;
int plane = size * size;
int id = TOID(tx, ty, tz, size);
int pixel1, pixel2;
pixel1 = PBAMARKER;
// Sweep down
for (int i = 0; i < bandSize; i++, id += plane) {
pixel2 = tex1Dfetch(pbaTexColor, id);
//if (tx == 256 && ty == 132 && tz == 0) printf("1 %d %d %d %d\n", pixel2, tx, ty, tz);
if (pixel2 != PBAMARKER)
pixel1 = pixel2;
output[id] = pixel1;
//if (id == 67840) printf("1 %d %d %d %d\n", pixel1, tx, ty, tz);
}
int dist1, dist2, nz;
id -= plane + plane;
// Sweep up
for (int i = bandSize - 2; i >= 0; i--, id -= plane) {
//if (id == 67840) printf("2 %d \n", pixel1);
nz = GET_Z(pixel1);
//if (id == 67840) printf("3 %d \n", nz);
dist1 = abs(nz - (tz + i));
//if (id == 67840) printf("4 %d \n", dist1);
pixel2 = output[id];
//if (id == 67840) printf("5 %d \n", pixel2);
nz = GET_Z(pixel2);
//if (id == 67840) printf("6 %d \n", nz);
dist2 = abs(nz - (tz + i));
//if (id == 67840) printf("7 %d %d %d\n", dist2, dist1, pixel1);
if (dist2 < dist1)
pixel1 = pixel2;
output[id] = pixel1;
//if (id == 67840) printf("8 %d \n", pixel1);
}
}
__global__ void PBADistanceField_kernelUpdateVertical(int *output, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tz = band * bandSize;
int id = TOID(tx, ty, tz, size);
int plane = size * size;
int top = tex1Dfetch(pbaTexLinks, id);
int bottom = tex1Dfetch(pbaTexLinks, TOID(tx, ty, tz + bandSize - 1, size));
int topZ = GET_Z(top);
int bottomZ = GET_Z(bottom);
int pixel;
int dist, myDist, nz;
for (int i = 0; i < bandSize; i++, id += plane) {
pixel = tex1Dfetch(pbaTexColor, id);
nz = GET_Z(pixel);
myDist = abs(nz - (tz + i));
dist = abs(topZ - (tz + i));
if (dist < myDist) { myDist = dist; pixel = top; }
dist = abs(bottomZ - (tz + i));
if (dist < myDist) pixel = bottom;
output[id] = pixel;
}
}
__global__ void PBADistanceField_kernelMaurerAxis(int *stack, int size, int mod, int bandSize, int test)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = band * bandSize;
int tz = blockIdx.y * blockDim.y + threadIdx.y;
int lastY = INFINITY;
int stackX_1, stackY_1 = INFINITY, stackZ_1, stackX_2, stackY_2 = INFINITY, stackZ_2;
int p = PBAMARKER, nx, ny, nz, s1, s2;
float i1, i2;
for (int i = 0; i < bandSize; i++, ty++) {
p = tex1Dfetch(pbaTexColor, TOID(tx, ty, tz, size));
//if (tx == 1 && (ty < 64 && ty >= 32) && tz == 200 && test == 1)
//if (tz == 250 && ty == 33 && tx <= 512 && test == 1)
//if (tx == 431 && tz == 250 && test == 0 && ty > 80 && ty < 101)
//if (tz == 250 && test == 1 && ty == 25)
//if (tz == 250 && ty == 65 && test == 1)
//if (tz == 250 && tx == 62 && test == 0)
//{
// DECODE(p, nx, ny, nz);
//if (ny == 330 && nz == 291 && test == 1)
// printf("ptr %d %d %d %d %d %d %d\n", tx, ty, tz , nx, ny, nz , i);
//}
//if (tx == 256 && tz == 0 && ty == 132)
//{
// printf("ptr %d %d %d\n", ty, p, TOID(tx, ty, tz, size ));
//printf("y1 : %d %d %d %d %d %d %d %d %d \n",stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, s1);
//printf("y2 : %d %d %d %d %d %d %d %d %d \n", stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz, s2);
//}
if (p != PBAMARKER) {
while (stackY_2 != INFINITY) {
DECODE(s1, stackX_1, stackY_1, stackZ_1);
DECODE(s2, stackX_2, stackY_2, stackZ_2);
i1 = interpointY(stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz);
DECODE(p, nx, ny, nz);
i2 = interpointY(stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz);
/*if (tx == 256 && tz == 0 && ty == 132)
{
printf("ptr %d %f %f %d %d\n", ty, i1, i2, i, lastY);
printf("y1 : %d %d %d %d %d %d %d %d %d \n",stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, s1);
printf("y2 : %d %d %d %d %d %d %d %d %d \n", stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz, s2);
}*/
//if (tx == 1 && (ty < 64 && ty >= 32) && tz == 0 && test == 1)
//{
//printf("ptr %d %d %d %f %f %d %d\n", tx, ty, tz , i1, i2, i, lastY);
//printf("y1 : %d %d %d %d %d %d %d %d %d \n",stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, s1);
//printf("y2 : %d %d %d %d %d %d %d %d %d \n", stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz, s2);
//}
/*if (tz == 250 && (ty >= 416 && ty < 448) && tx == 0 && test == 1)
{
printf("ptr %d %d %d %f %f %d %d\n", tx, ty, tz , i1, i2, i, lastY);
printf("y1 : %d %d %d %d %d %d %d %d %d \n",stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, s1);
printf("y2 : %d %d %d %d %d %d %d %d %d \n", stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz, s2);
}*/
/*if (tx == 431 && tz == 250 && test == 0 && ty > 80 && ty < 101)
{
printf("ptr %d %d %d %f %f %d %d\n", tx, ty, tz , i1, i2, i, lastY);
}*/
//if (tz == 250 && tx == 111 && (ty <= 440 && ty >= 420))
if (i1 < i2)
break;
//if (tz == 250 && ty == 33 && tx <= 512 && test == 1)
/*if (tz == 280 && tx == 280 && ty < 128 && ty >= 96 && test == 1)
{
printf("ptr %d %d %d %f %f %d %d \n y1 : %d %d %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d \n", tx, ty, tz , i1, i2, i, lastY, stackX_1, stackY_2, stackZ_1, stackX_2, lastY, stackZ_2, tx, tz, stackX_2, lastY, stackZ_2, nx, ty, nz, tx, tz);
}*/
lastY = stackY_2; s2 = s1; stackY_2 = stackY_1;
if (stackY_2 != INFINITY)
s1 = stack[TOID(tx, stackY_2, tz, size)];
}
DECODE(p, nx, ny, nz);
s1 = s2; s2 = ENCODE(nx, lastY, nz);
stackY_2 = lastY; lastY = ty;
stack[TOID(tx, ty, tz, size)] = s2;
/*if (tx == 431 && tz == 250 && test == 0 && ty > 80 && ty < 101)
{
DECODE(s2, nx, ny, nz);
//if (ny == 330 && nz == 291 && test == 1)
printf("ptr2 %d %d %d %d %d %d %d\n", tx, ty, tz , nx, ny, nz , s2);
}*/
}
}
if (p == PBAMARKER)
stack[TOID(tx, ty-1, tz, size)] = ENCODE(INFINITY, lastY, INFINITY);
}
__global__ void PBADistanceField_kernelMergeBands(int *stack, int *forward, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band1 = (blockIdx.x / mod) * 2;
int band2 = band1 + 1;
int tx = blkX * blockDim.x + threadIdx.x;
int tz = blockIdx.y * blockDim.y + threadIdx.y;
int firstY, lastY, next, p, id;
int3 stack_1, stack_2, current;
float i1, i2;
firstY = band2 * bandSize;
lastY = firstY - 1;
/*if ( tx == 431 && tz == 250)
{
int nx, ny, nz;
p = tex1Dfetch(pbaTexLinks, TOID(431, 97, 250, size));
DECODE(p, nx, ny, nz);
// //if (ny == 330 && nz == 291 && test == 1)
printf("ptr %d %d %d %d %d %d \n", tx, bandSize, tz , nx, ny, nz );
}*/
// Band 1, get the two last items
p = tex1Dfetch(pbaTexLinks, TOID(tx, lastY, tz, size));
DECODE(p, stack_2.x, stack_2.y, stack_2.z);
/*if ( tx == 431 && tz == 250 && bandSize == 64 && band2 == 1)
{
printf("ptr111 %d %d %d %d \n", lastY, stack_2.x, stack_2.y, stack_2.z );
}*/
if (stack_2.x == INFINITY) { // Not a site
lastY = stack_2.y;
if (lastY != INFINITY) {
p = tex1Dfetch(pbaTexLinks, TOID(tx, lastY, tz, size));
DECODE(p, stack_2.x, stack_2.y, stack_2.z);
}
}
if (stack_2.y != INFINITY) {
p = tex1Dfetch(pbaTexLinks, TOID(tx, stack_2.y, tz, size));
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
}
// Band 2, get the first item
next = tex1Dfetch(pbaTexPointer, TOID(tx, firstY, tz, size));
if (next < 0) // Not a site
firstY = -next;
if (firstY != INFINITY) {
id = TOID(tx, firstY, tz, size);
p = tex1Dfetch(pbaTexLinks, id);
DECODE(p, current.x, current.y, current.z);
}
/*if ( tx == 431 && tz == 250 && bandSize == 64 && band2 == 1)
{
printf("ptr222 %d %d %d %d %d %d %d %d %d %d %d\n", firstY, band2, stack_1.x, stack_1.y, stack_1.z, stack_2.x, stack_2.y, stack_2.z, current.x, current.y, current.z );
}*/
int top = 0;
int count = 0; //Deb
while (top < 2 && firstY != INFINITY) {
while (stack_2.y != INFINITY) {
i1 = interpointY(stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, tx, tz);
i2 = interpointY(stack_2.x, lastY, stack_2.z, current.x, firstY, current.z, tx, tz);
//if (tx == 503 && tz == 70)
// printf("-- %d %d \n", lastY, stack_2.y);
if (tz == 280 && tx == 280 && bandSize == 32 && lastY == 116 )// && firstY < 70 )
{
//printf("!----------- %d %d %d %f %f %d %d %d \n", stack_2.y, lastY, firstY, i1, i2, stack_1.z, stack_2.z, current.z);
printf("y1 : %d %d %d %d %d %d \n y2 : %d %d %d %d %d %d %d %d %f %f\n", stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, stack_2.x, lastY, stack_2.z, current.x, firstY, current.z, tx, tz, i1, i2);
}
if (i1 < i2)
break;
//if (bandSize == 128 && tz == 311 && tx == 256 )// && firstY < 70 )
//if (bandSize == 128 && tz == 250 && tx == 431)
count++;
lastY = stack_2.y; stack_2 = stack_1;
top--;
if (stack_2.y != INFINITY) {
p = stack[TOID(tx, stack_2.y, tz, size)];
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
}
}
// Update pointers to link the current node to the stack
stack[id] = ENCODE(current.x, lastY, current.z);
//if (tz == 250 && tx == 431 && bandSize == 64 && band2 == 1)
//{
//int3 test;
//DECODE(stack[TOID(tx, 97, tz, size)], test.x, test.y, test.z);
//printf("stack %d %d %d %d %d %d %d \n", bandSize, test.x, test.y, test.z, current.x, lastY, current.z );
// printf("stack %d %d %d %d %d %d %d \n", bandSize, id%size, (id/(size))%size, id/(size*size), current.x, lastY, current.z );
//}
if (lastY != INFINITY)
{
forward[TOID(tx, lastY, tz, size)] = firstY;
//if (tz == 250 && tx == 431 && bandSize == 64 && band2 == 1)
// printf("forward %d %d %d \n", bandSize, lastY, firstY );
}
top = max(1, top + 1);
// Advance the current pointer forward
stack_1 = stack_2; stack_2 = make_int3(current.x, lastY, current.z); lastY = firstY;
firstY = tex1Dfetch(pbaTexPointer, id);
if (firstY != INFINITY) {
id = TOID(tx, firstY, tz, size);
p = tex1Dfetch(pbaTexLinks, id);
DECODE(p, current.x, current.y, current.z);
}
}
//if (count >= 39)
//printf("test %d %d %d %d\n", tx, tz, count, bandSize);
// Update the head pointer
firstY = band1 * bandSize;
lastY = band2 * bandSize;
if (tex1Dfetch(pbaTexPointer, TOID(tx, firstY, tz, size)) == -INFINITY)
forward[TOID(tx, firstY, tz, size)] = -abs(tex1Dfetch(pbaTexPointer, TOID(tx, lastY, tz, size)));
// Update the tail pointer
firstY = band1 * bandSize + bandSize - 1;
lastY = band2 * bandSize + bandSize - 1;
p = tex1Dfetch(pbaTexLinks, TOID(tx, lastY, tz, size));
DECODE(p, current.x, current.y, current.z);
if (current.x == INFINITY && current.y == INFINITY) {
p = tex1Dfetch(pbaTexLinks, TOID(tx, firstY, tz, size));
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
if (stack_1.x == INFINITY)
current.y = stack_1.y;
else
current.y = firstY;
stack[TOID(tx, lastY, tz, size)] = ENCODE(current.x, current.y, current.z);
//if (tz == 250 && tx == 431 && bandSize == 64 && band2 == 1)
//{
// printf("-stack %d %d %d %d %d \n", bandSize, lastY, current.x, current.y, current.z );
//}
}
/* if (tz == 250 && tx == 431 && bandSize == 256)
{
int nx, ny, nz;
for(int a = 0; a < 512 ; a++)
{
DECODE(stack[TOID(tx, a, tz, size)], nx, ny, nz);
printf("%d %d %d %d \n", a, nx, ny, nz);
}
}*/
}
__global__ void PBADistanceField_kernelCreateForwardPointers(int *output, int size, int mod, int bandSize)
{
int blkX = blockIdx.x % mod;
int band = blockIdx.x / mod;
int tx = blkX * blockDim.x + threadIdx.x;
int ty = (band+1) * bandSize - 1;
int tz = blockIdx.y * blockDim.y + threadIdx.y;
int lasty = INFINITY, nexty;
int current, id;
// Get the tail pointer
current = tex1Dfetch(pbaTexLinks, TOID(tx, ty, tz, size));
if (GET_X(current) == INFINITY)
nexty = GET_Y(current);
else
nexty = ty;
id = TOID(tx, ty, tz, size);
for (int i = 0; i < bandSize; i++, ty--, id -= size)
if (ty == nexty) {
output[id] = lasty;
nexty = GET_Y(tex1Dfetch(pbaTexLinks, id));
lasty = ty;
}
// Store the pointer to the head at the first pixel of this band
if (lasty != ty + 1)
output[id + size] = -lasty;
}
__global__ void PBADistanceField_kernelColorAxis(int *output, int size)
{
__shared__ int3 s_Stack1[BLOCKX], s_Stack2[BLOCKX];
__shared__ int s_lastY[BLOCKX];
__shared__ float s_ii[BLOCKX];
int col = threadIdx.x;
int tid = threadIdx.y;
int tx = blockIdx.x * blockDim.x + col;
int tz = blockIdx.y;
int3 stack_1, stack_2;
int p, lastY;
float ii;
if (tid == blockDim.y - 1) {
lastY = size - 1;
p = tex1Dfetch(pbaTexColor, TOID(tx, lastY, tz, size));
DECODE(p, stack_2.x, stack_2.y, stack_2.z);
if (stack_2.x == INFINITY) { // Not a site
lastY = stack_2.y;
if (lastY != INFINITY) {
p = tex1Dfetch(pbaTexColor, TOID(tx, lastY, tz, size));
DECODE(p, stack_2.x, stack_2.y, stack_2.z);
}
}
if (stack_2.y != INFINITY) {
p = tex1Dfetch(pbaTexColor, TOID(tx, stack_2.y, tz, size));
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
ii = interpointY(stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, tx, tz);
}
//if (tz == 250 && tx == 431)
//{
//printf("~~~~%f %d %f \n", s_ii[col], col, ii);
// printf("~~~ %d %d %d %d %d %d %d %d %d \n", blockDim.y - 1, stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, tx, tz);
//}
s_Stack1[col] = stack_1; s_Stack2[col] = stack_2; s_lastY[col] = lastY; s_ii[col] = ii;
}
__syncthreads();
if (tz == 311 && tx == 256)
{
/*int nx, ny, nz;
for(int a = 0; a < 512 ; a++)
{
p = tex1Dfetch(pbaTexColor, TOID(tx, a, tz, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n",a, nx, ny,nz);
}*/
/*p = tex1Dfetch(pbaTexColor, TOID(431, 97, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 97, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 98, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 98, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 99, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n",99, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 100, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 100, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 101, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 101, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 102, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 102, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 103, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 103, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 104, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 104, nx, ny,nz);
p = tex1Dfetch(pbaTexColor, TOID(431, 105, 250, size));
DECODE(p, nx,ny, nz);
printf("%d %d %d %d \n", 105, nx, ny,nz);*/
}
for (int ty = size - 1 - tid; ty >= 0; ty -= blockDim.y) {
stack_1 = s_Stack1[col]; stack_2 = s_Stack2[col]; lastY = s_lastY[col]; ii = s_ii[col];
/**/
//if (tz == 250 && tx == 431)
// printf("@@@ %d %d %d %d %d %d \n",tx, ty, tz, stack_2.x, lastY, stack_2.z);
while (stack_2.y != INFINITY) {
if (ty > ii)
break;
/*if (tz == 250 && tx == 431 )
{
printf("------ %d %f %d\n", ty, ii, stack_2.y);
}*/
lastY = stack_2.y; stack_2 = stack_1;
if (stack_2.y != INFINITY) {
p = tex1Dfetch(pbaTexColor, TOID(tx, stack_2.y, tz, size));
DECODE(p, stack_1.x, stack_1.y, stack_1.z);
ii = interpointY(stack_1.x, stack_2.y, stack_1.z, stack_2.x, lastY, stack_2.z, tx, tz);
}
}
__syncthreads();
/*if (tz == 250 && tx == 431 )
{
printf("Encode %d %d %d %d \n", ty, stack_2.x, lastY, stack_2.z);
}*/
output[TOID(tx, ty, tz, size)] = ENCODE(stack_2.x, lastY, stack_2.z);
if (tid == blockDim.y - 1) {
s_Stack1[col] = stack_1; s_Stack2[col] = stack_2; s_lastY[col] = lastY; s_ii[col] = ii;
}
__syncthreads();
}
//if (tz == 280 && tx == 280)
//{
// int nx, ny, nz;
// for(int a = 0; a < 512 ; a++)
// {
// p = output[TOID(tx, a, tz, size)];//tex1Dfetch(pbaTexColor, TOID(431, a, 250, size));
// DECODE(p, nx,ny, nz);
// printf("%d %d %d %d \n",a, nx,ny, nz);
// }
//}
}
__global__ void PBADistanceField_kernelTransposeXY(int *data, int log2Width, int mask)
{
__shared__ int block1[BLOCKXY][BLOCKXY + 1];
__shared__ int block2[BLOCKXY][BLOCKXY + 1];
int blkX = blockIdx.y;
int blkY = blockIdx.x >> log2Width;
int blkZ = blockIdx.x & mask;
if (blkX > blkY)
return ;
int x, y, z, id1, id2;
int pixel;
blkX = __mul24(blkX, BLOCKXY);
blkY = __mul24(blkY, BLOCKXY);
z = blkZ << log2Width;
// read the cube into shared memory
x = blkX + threadIdx.x;
y = blkY + threadIdx.y;
id1 = ((z + y) << log2Width) + x;
block1[threadIdx.y][threadIdx.x] = data[id1];
x = blkY + threadIdx.x;
y = blkX + threadIdx.y;
id2 = ((z + y) << log2Width) + x;
block2[threadIdx.y][threadIdx.x] = data[id2];
__syncthreads();
if (id2 == 0) printf("------------------------------------- hahahaha\n");
// write the rotated cube to global memory
pixel = block1[threadIdx.x][threadIdx.y];
data[id2] = ROTATEXY(pixel);
pixel = block2[threadIdx.x][threadIdx.y];
data[id1] = ROTATEXY(pixel);
}
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
//--------------------------------------------------------------------------------------------
__constant__ unsigned int MultiplyDeBruijnBitPosition[] = { 0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9 };
__constant__ unsigned char BitReverseTable256[] =
{
0x00, 0x80, 0x40, 0xC0, 0x20, 0xA0, 0x60, 0xE0, 0x10, 0x90, 0x50, 0xD0, 0x30, 0xB0, 0x70, 0xF0,
0x08, 0x88, 0x48, 0xC8, 0x28, 0xA8, 0x68, 0xE8, 0x18, 0x98, 0x58, 0xD8, 0x38, 0xB8, 0x78, 0xF8,
0x04, 0x84, 0x44, 0xC4, 0x24, 0xA4, 0x64, 0xE4, 0x14, 0x94, 0x54, 0xD4, 0x34, 0xB4, 0x74, 0xF4,
0x0C, 0x8C, 0x4C, 0xCC, 0x2C, 0xAC, 0x6C, 0xEC, 0x1C, 0x9C, 0x5C, 0xDC, 0x3C, 0xBC, 0x7C, 0xFC,
0x02, 0x82, 0x42, 0xC2, 0x22, 0xA2, 0x62, 0xE2, 0x12, 0x92, 0x52, 0xD2, 0x32, 0xB2, 0x72, 0xF2,
0x0A, 0x8A, 0x4A, 0xCA, 0x2A, 0xAA, 0x6A, 0xEA, 0x1A, 0x9A, 0x5A, 0xDA, 0x3A, 0xBA, 0x7A, 0xFA,
0x06, 0x86, 0x46, 0xC6, 0x26, 0xA6, 0x66, 0xE6, 0x16, 0x96, 0x56, 0xD6, 0x36, 0xB6, 0x76, 0xF6,
0x0E, 0x8E, 0x4E, 0xCE, 0x2E, 0xAE, 0x6E, 0xEE, 0x1E, 0x9E, 0x5E, 0xDE, 0x3E, 0xBE, 0x7E, 0xFE,
0x01, 0x81, 0x41, 0xC1, 0x21, 0xA1, 0x61, 0xE1, 0x11, 0x91, 0x51, 0xD1, 0x31, 0xB1, 0x71, 0xF1,
0x09, 0x89, 0x49, 0xC9, 0x29, 0xA9, 0x69, 0xE9, 0x19, 0x99, 0x59, 0xD9, 0x39, 0xB9, 0x79, 0xF9,
0x05, 0x85, 0x45, 0xC5, 0x25, 0xA5, 0x65, 0xE5, 0x15, 0x95, 0x55, 0xD5, 0x35, 0xB5, 0x75, 0xF5,
0x0D, 0x8D, 0x4D, 0xCD, 0x2D, 0xAD, 0x6D, 0xED, 0x1D, 0x9D, 0x5D, 0xDD, 0x3D, 0xBD, 0x7D, 0xFD,
0x03, 0x83, 0x43, 0xC3, 0x23, 0xA3, 0x63, 0xE3, 0x13, 0x93, 0x53, 0xD3, 0x33, 0xB3, 0x73, 0xF3,
0x0B, 0x8B, 0x4B, 0xCB, 0x2B, 0xAB, 0x6B, 0xEB, 0x1B, 0x9B, 0x5B, 0xDB, 0x3B, 0xBB, 0x7B, 0xFB,
0x07, 0x87, 0x47, 0xC7, 0x27, 0xA7, 0x67, 0xE7, 0x17, 0x97, 0x57, 0xD7, 0x37, 0xB7, 0x77, 0xF7,
0x0F, 0x8F, 0x4F, 0xCF, 0x2F, 0xAF, 0x6F, 0xEF, 0x1F, 0x9F, 0x5F, 0xDF, 0x3F, 0xBF, 0x7F, 0xFF
};
__device__ inline unsigned int bitCount(unsigned int i)
{
i = i - ((i >> 1) & 0x55555555);
i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
return ((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) >> 24;
}
__device__ inline unsigned int Reversebit(unsigned int v)
{
unsigned int r;
r = (BitReverseTable256[ v & 0xff] << 24) |
(BitReverseTable256[(v >> 8) & 0xff] << 16) |
(BitReverseTable256[(v >> 16) & 0xff] << 8) |
(BitReverseTable256[(v >> 24) & 0xff]);
return r;
}
__device__ inline unsigned int GetFirstBitPos(unsigned int source)
{
return (MultiplyDeBruijnBitPosition[((unsigned int)((source & -source) * 0x077CB531U)) >> 27]);
}
__device__ inline unsigned int GetLastBitPos(unsigned int source)
{
unsigned int r = Reversebit(source);
return (31-(MultiplyDeBruijnBitPosition[((unsigned int)((r & -r) * 0x077CB531U)) >> 27]));
}
__device__ inline unsigned int SetBitPos(unsigned int pos)
{
return (1 << pos);
}
__device__ inline bool GetBitPos(unsigned int pos, unsigned int source)
{
return (source & (1 << pos));
}
__device__ inline float interpointY(int x1, int y1, int z1, int x2, int y2, int z2, int x0, int z0)
{
float xM = (x1 + x2) / 2.0f;
float yM = (y1 + y2) / 2.0f;
float zM = (z1 + z2) / 2.0f;
float nx = x2 - x1;
float ny = y2 - y1;
float nz = z2 - z1;
return yM + (nx * (xM - x0) + nz * (zM - z0)) / ny;
}
__device__ inline int middlepointY(unsigned int site1, unsigned int site2, int z0)
{
int dy22 = (GET_PTR(site2)-z0)*(GET_PTR(site2)-z0);
int dy12 = (GET_PTR(site1)-z0)*(GET_PTR(site1)-z0);
int d1 = GET_STACK(site1);
int d2 = GET_STACK(site2);
return int(0.5 * ((dy22-dy12)/(float)(d2-d1) + d1+d2))+1;
}
__device__ inline int middlepointX(unsigned int site1, unsigned int site2, int y0, int z0)
{
int xPlusx = GET_X(site1) + GET_X(site2);
int xMinusx = GET_X(site1) - GET_X(site2);
int yPlusy = GET_Y(site1) + GET_Y(site2);
int yMinusy = GET_Y(site1) - GET_Y(site2);
int zPlusz = GET_Z(site1) + GET_Z(site2);
int zMinusz = GET_Z(site1) - GET_Z(site2);
return int(0.5 * ((zMinusz*(zPlusz-2.0*z0)+yMinusy*(yPlusy-2.0*y0))/(float)xMinusx + xPlusx))+1;
}
|
ee3ae6ba4a87b49ac5f5b6ba77ad9a3125db4c3f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel1_zdir;
int xdim0_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim0_advec_cell_kernel1_zdir;
int ydim0_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim1_advec_cell_kernel1_zdir;
int xdim1_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim1_advec_cell_kernel1_zdir;
int ydim1_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim2_advec_cell_kernel1_zdir;
int xdim2_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim2_advec_cell_kernel1_zdir;
int ydim2_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim3_advec_cell_kernel1_zdir;
int xdim3_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim3_advec_cell_kernel1_zdir;
int ydim3_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim4_advec_cell_kernel1_zdir;
int xdim4_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim4_advec_cell_kernel1_zdir;
int ydim4_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim5_advec_cell_kernel1_zdir;
int xdim5_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim5_advec_cell_kernel1_zdir;
int ydim5_advec_cell_kernel1_zdir_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_cell_kernel1_zdir * (y) + \
xdim0_advec_cell_kernel1_zdir * ydim0_advec_cell_kernel1_zdir * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_cell_kernel1_zdir * (y) + \
xdim1_advec_cell_kernel1_zdir * ydim1_advec_cell_kernel1_zdir * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_cell_kernel1_zdir * (y) + \
xdim2_advec_cell_kernel1_zdir * ydim2_advec_cell_kernel1_zdir * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_cell_kernel1_zdir * (y) + \
xdim3_advec_cell_kernel1_zdir * ydim3_advec_cell_kernel1_zdir * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_cell_kernel1_zdir * (y) + \
xdim4_advec_cell_kernel1_zdir * ydim4_advec_cell_kernel1_zdir * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_advec_cell_kernel1_zdir * (y) + \
xdim5_advec_cell_kernel1_zdir * ydim5_advec_cell_kernel1_zdir * (z))
// user function
__device__
inline void
advec_cell_kernel1_zdir_gpu(double *pre_vol, double *post_vol,
const double *volume, const double *vol_flux_x,
const double *vol_flux_y,
const double *vol_flux_z) {
pre_vol[OPS_ACC0(0, 0, 0)] =
volume[OPS_ACC2(0, 0, 0)] +
(vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)] +
vol_flux_y[OPS_ACC4(0, 1, 0)] - vol_flux_y[OPS_ACC4(0, 0, 0)] +
vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]);
post_vol[OPS_ACC1(0, 0, 0)] =
pre_vol[OPS_ACC0(0, 0, 0)] -
(vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_advec_cell_kernel1_zdir(double *__restrict arg0,
double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
const double *__restrict arg4,
const double *__restrict arg5,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim0_advec_cell_kernel1_zdir *
ydim0_advec_cell_kernel1_zdir;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim1_advec_cell_kernel1_zdir *
ydim1_advec_cell_kernel1_zdir;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim2_advec_cell_kernel1_zdir *
ydim2_advec_cell_kernel1_zdir;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim3_advec_cell_kernel1_zdir *
ydim3_advec_cell_kernel1_zdir;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim4_advec_cell_kernel1_zdir *
ydim4_advec_cell_kernel1_zdir;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim5_advec_cell_kernel1_zdir *
ydim5_advec_cell_kernel1_zdir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel1_zdir_gpu(arg0, arg1, arg2, arg3, arg4, arg5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel1_zdir(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
#else
void ops_par_loop_advec_cell_kernel1_zdir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 6, range, 117))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(117, "advec_cell_kernel1_zdir");
OPS_kernels[117].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != xdim0_advec_cell_kernel1_zdir_h ||
ydim0 != ydim0_advec_cell_kernel1_zdir_h ||
xdim1 != xdim1_advec_cell_kernel1_zdir_h ||
ydim1 != ydim1_advec_cell_kernel1_zdir_h ||
xdim2 != xdim2_advec_cell_kernel1_zdir_h ||
ydim2 != ydim2_advec_cell_kernel1_zdir_h ||
xdim3 != xdim3_advec_cell_kernel1_zdir_h ||
ydim3 != ydim3_advec_cell_kernel1_zdir_h ||
xdim4 != xdim4_advec_cell_kernel1_zdir_h ||
ydim4 != ydim4_advec_cell_kernel1_zdir_h ||
xdim5 != xdim5_advec_cell_kernel1_zdir_h ||
ydim5 != ydim5_advec_cell_kernel1_zdir_h) {
hipMemcpyToSymbol(xdim0_advec_cell_kernel1_zdir, &xdim0, sizeof(int));
xdim0_advec_cell_kernel1_zdir_h = xdim0;
hipMemcpyToSymbol(ydim0_advec_cell_kernel1_zdir, &ydim0, sizeof(int));
ydim0_advec_cell_kernel1_zdir_h = ydim0;
hipMemcpyToSymbol(xdim1_advec_cell_kernel1_zdir, &xdim1, sizeof(int));
xdim1_advec_cell_kernel1_zdir_h = xdim1;
hipMemcpyToSymbol(ydim1_advec_cell_kernel1_zdir, &ydim1, sizeof(int));
ydim1_advec_cell_kernel1_zdir_h = ydim1;
hipMemcpyToSymbol(xdim2_advec_cell_kernel1_zdir, &xdim2, sizeof(int));
xdim2_advec_cell_kernel1_zdir_h = xdim2;
hipMemcpyToSymbol(ydim2_advec_cell_kernel1_zdir, &ydim2, sizeof(int));
ydim2_advec_cell_kernel1_zdir_h = ydim2;
hipMemcpyToSymbol(xdim3_advec_cell_kernel1_zdir, &xdim3, sizeof(int));
xdim3_advec_cell_kernel1_zdir_h = xdim3;
hipMemcpyToSymbol(ydim3_advec_cell_kernel1_zdir, &ydim3, sizeof(int));
ydim3_advec_cell_kernel1_zdir_h = ydim3;
hipMemcpyToSymbol(xdim4_advec_cell_kernel1_zdir, &xdim4, sizeof(int));
xdim4_advec_cell_kernel1_zdir_h = xdim4;
hipMemcpyToSymbol(ydim4_advec_cell_kernel1_zdir, &ydim4, sizeof(int));
ydim4_advec_cell_kernel1_zdir_h = ydim4;
hipMemcpyToSymbol(xdim5_advec_cell_kernel1_zdir, &xdim5, sizeof(int));
xdim5_advec_cell_kernel1_zdir_h = xdim5;
hipMemcpyToSymbol(ydim5_advec_cell_kernel1_zdir, &ydim5, sizeof(int));
ydim5_advec_cell_kernel1_zdir_h = ydim5;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[117].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_advec_cell_kernel1_zdir), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[117].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[117].mpi_time += t2 - t1;
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel1_zdir(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 117;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 117;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_advec_cell_kernel1_zdir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(117, "advec_cell_kernel1_zdir");
}
ops_enqueue_kernel(desc);
}
#endif
|
ee3ae6ba4a87b49ac5f5b6ba77ad9a3125db4c3f.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_advec_cell_kernel1_zdir;
int xdim0_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim0_advec_cell_kernel1_zdir;
int ydim0_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim1_advec_cell_kernel1_zdir;
int xdim1_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim1_advec_cell_kernel1_zdir;
int ydim1_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim2_advec_cell_kernel1_zdir;
int xdim2_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim2_advec_cell_kernel1_zdir;
int ydim2_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim3_advec_cell_kernel1_zdir;
int xdim3_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim3_advec_cell_kernel1_zdir;
int ydim3_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim4_advec_cell_kernel1_zdir;
int xdim4_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim4_advec_cell_kernel1_zdir;
int ydim4_advec_cell_kernel1_zdir_h = -1;
__constant__ int xdim5_advec_cell_kernel1_zdir;
int xdim5_advec_cell_kernel1_zdir_h = -1;
__constant__ int ydim5_advec_cell_kernel1_zdir;
int ydim5_advec_cell_kernel1_zdir_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
#define OPS_ACC0(x, y, z) \
(x + xdim0_advec_cell_kernel1_zdir * (y) + \
xdim0_advec_cell_kernel1_zdir * ydim0_advec_cell_kernel1_zdir * (z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_advec_cell_kernel1_zdir * (y) + \
xdim1_advec_cell_kernel1_zdir * ydim1_advec_cell_kernel1_zdir * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_advec_cell_kernel1_zdir * (y) + \
xdim2_advec_cell_kernel1_zdir * ydim2_advec_cell_kernel1_zdir * (z))
#define OPS_ACC3(x, y, z) \
(x + xdim3_advec_cell_kernel1_zdir * (y) + \
xdim3_advec_cell_kernel1_zdir * ydim3_advec_cell_kernel1_zdir * (z))
#define OPS_ACC4(x, y, z) \
(x + xdim4_advec_cell_kernel1_zdir * (y) + \
xdim4_advec_cell_kernel1_zdir * ydim4_advec_cell_kernel1_zdir * (z))
#define OPS_ACC5(x, y, z) \
(x + xdim5_advec_cell_kernel1_zdir * (y) + \
xdim5_advec_cell_kernel1_zdir * ydim5_advec_cell_kernel1_zdir * (z))
// user function
__device__
inline void
advec_cell_kernel1_zdir_gpu(double *pre_vol, double *post_vol,
const double *volume, const double *vol_flux_x,
const double *vol_flux_y,
const double *vol_flux_z) {
pre_vol[OPS_ACC0(0, 0, 0)] =
volume[OPS_ACC2(0, 0, 0)] +
(vol_flux_x[OPS_ACC3(1, 0, 0)] - vol_flux_x[OPS_ACC3(0, 0, 0)] +
vol_flux_y[OPS_ACC4(0, 1, 0)] - vol_flux_y[OPS_ACC4(0, 0, 0)] +
vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]);
post_vol[OPS_ACC1(0, 0, 0)] =
pre_vol[OPS_ACC0(0, 0, 0)] -
(vol_flux_z[OPS_ACC5(0, 0, 1)] - vol_flux_z[OPS_ACC5(0, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC3
#undef OPS_ACC4
#undef OPS_ACC5
__global__ void ops_advec_cell_kernel1_zdir(double *__restrict arg0,
double *__restrict arg1,
const double *__restrict arg2,
const double *__restrict arg3,
const double *__restrict arg4,
const double *__restrict arg5,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim0_advec_cell_kernel1_zdir *
ydim0_advec_cell_kernel1_zdir;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim1_advec_cell_kernel1_zdir *
ydim1_advec_cell_kernel1_zdir;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim2_advec_cell_kernel1_zdir *
ydim2_advec_cell_kernel1_zdir;
arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim3_advec_cell_kernel1_zdir *
ydim3_advec_cell_kernel1_zdir;
arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim4_advec_cell_kernel1_zdir *
ydim4_advec_cell_kernel1_zdir;
arg5 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim5_advec_cell_kernel1_zdir +
idx_z * 1 * 1 * xdim5_advec_cell_kernel1_zdir *
ydim5_advec_cell_kernel1_zdir;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
advec_cell_kernel1_zdir_gpu(arg0, arg1, arg2, arg3, arg4, arg5);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_advec_cell_kernel1_zdir(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
#else
void ops_par_loop_advec_cell_kernel1_zdir_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
#endif
// Timing
double t1, t2, c1, c2;
ops_arg args[6] = {arg0, arg1, arg2, arg3, arg4, arg5};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args, 6, range, 117))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(117, "advec_cell_kernel1_zdir");
OPS_kernels[117].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
if (xdim0 != xdim0_advec_cell_kernel1_zdir_h ||
ydim0 != ydim0_advec_cell_kernel1_zdir_h ||
xdim1 != xdim1_advec_cell_kernel1_zdir_h ||
ydim1 != ydim1_advec_cell_kernel1_zdir_h ||
xdim2 != xdim2_advec_cell_kernel1_zdir_h ||
ydim2 != ydim2_advec_cell_kernel1_zdir_h ||
xdim3 != xdim3_advec_cell_kernel1_zdir_h ||
ydim3 != ydim3_advec_cell_kernel1_zdir_h ||
xdim4 != xdim4_advec_cell_kernel1_zdir_h ||
ydim4 != ydim4_advec_cell_kernel1_zdir_h ||
xdim5 != xdim5_advec_cell_kernel1_zdir_h ||
ydim5 != ydim5_advec_cell_kernel1_zdir_h) {
cudaMemcpyToSymbol(xdim0_advec_cell_kernel1_zdir, &xdim0, sizeof(int));
xdim0_advec_cell_kernel1_zdir_h = xdim0;
cudaMemcpyToSymbol(ydim0_advec_cell_kernel1_zdir, &ydim0, sizeof(int));
ydim0_advec_cell_kernel1_zdir_h = ydim0;
cudaMemcpyToSymbol(xdim1_advec_cell_kernel1_zdir, &xdim1, sizeof(int));
xdim1_advec_cell_kernel1_zdir_h = xdim1;
cudaMemcpyToSymbol(ydim1_advec_cell_kernel1_zdir, &ydim1, sizeof(int));
ydim1_advec_cell_kernel1_zdir_h = ydim1;
cudaMemcpyToSymbol(xdim2_advec_cell_kernel1_zdir, &xdim2, sizeof(int));
xdim2_advec_cell_kernel1_zdir_h = xdim2;
cudaMemcpyToSymbol(ydim2_advec_cell_kernel1_zdir, &ydim2, sizeof(int));
ydim2_advec_cell_kernel1_zdir_h = ydim2;
cudaMemcpyToSymbol(xdim3_advec_cell_kernel1_zdir, &xdim3, sizeof(int));
xdim3_advec_cell_kernel1_zdir_h = xdim3;
cudaMemcpyToSymbol(ydim3_advec_cell_kernel1_zdir, &ydim3, sizeof(int));
ydim3_advec_cell_kernel1_zdir_h = ydim3;
cudaMemcpyToSymbol(xdim4_advec_cell_kernel1_zdir, &xdim4, sizeof(int));
xdim4_advec_cell_kernel1_zdir_h = xdim4;
cudaMemcpyToSymbol(ydim4_advec_cell_kernel1_zdir, &ydim4, sizeof(int));
ydim4_advec_cell_kernel1_zdir_h = ydim4;
cudaMemcpyToSymbol(xdim5_advec_cell_kernel1_zdir, &xdim5, sizeof(int));
xdim5_advec_cell_kernel1_zdir_h = xdim5;
cudaMemcpyToSymbol(ydim5_advec_cell_kernel1_zdir, &ydim5, sizeof(int));
ydim5_advec_cell_kernel1_zdir_h = ydim5;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
char *p_a[6];
// set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3 +
dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]);
base3 = base3 +
dat3 * args[3].dat->size[0] * args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4 +
dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]);
base4 = base4 +
dat4 * args[4].dat->size[0] * args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5 +
dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1]);
base5 = base5 +
dat5 * args[5].dat->size[0] * args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 6);
ops_halo_exchanges(args, 6, range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[117].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_advec_cell_kernel1_zdir<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5], x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[117].time += t1 - t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 6);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
#endif
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[117].mpi_time += t2 - t1;
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[117].transfer += ops_compute_transfer(dim, start, end, &arg5);
}
}
#ifdef OPS_LAZY
void ops_par_loop_advec_cell_kernel1_zdir(char const *name, ops_block block,
int dim, int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2,
ops_arg arg3, ops_arg arg4,
ops_arg arg5) {
ops_kernel_descriptor *desc =
(ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 117;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 117;
for (int i = 0; i < 6; i++) {
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 6;
desc->args = (ops_arg *)malloc(6 * sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->function = ops_par_loop_advec_cell_kernel1_zdir_execute;
if (OPS_diags > 1) {
ops_timing_realloc(117, "advec_cell_kernel1_zdir");
}
ops_enqueue_kernel(desc);
}
#endif
|
24729b2935231b9f08c2520bfa48739fe77d23e7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmergecg.cu normal z -> c, Wed Sep 17 15:08:43 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_c
// These routines merge multiple kernels from cmergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_ccgreduce_kernel_spmv1( int Gs,
int n,
magmaFloatComplex *vtmp,
magmaFloatComplex *vtmp2 ){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_ccgmerge_spmvcsr_kernel(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d[ d_colind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_ccgmerge_spmvellpackt_kernel(
int n,
int num_cols_per_row,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row ; k ++){
int col = d_colind [ n * k + i ];
magmaFloatComplex val = d_val [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_ccgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row ; k ++){
int col = d_colind [ num_cols_per_row * i + k ];
magmaFloatComplex val = d_val [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_8(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp,
magma_int_t T,
magma_int_t alignment ){
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ){
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_16(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp,
magma_int_t T,
magma_int_t alignment ){
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ){
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_32(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp,
magma_int_t T,
magma_int_t alignment ){
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ){
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_ccgmerge_spmvellpackrt_kernel2(
int n,
magmaFloatComplex *z,
magmaFloatComplex *d,
magmaFloatComplex *vtmp2
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_C_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_ccgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = d_rowptr[ blockIdx.x ];
int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++){
int col = d_colind [offset+ blocksize * n + Idx ];
magmaFloatComplex val = d_val[offset+ blocksize * n + Idx];
if( val != 0){
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_8( int num_rows,
int blocksize,
int T,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d,
magmaFloatComplex *z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_16( int num_rows,
int blocksize,
int T,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d,
magmaFloatComplex *z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_32( int num_rows,
int blocksize,
int T,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d,
magmaFloatComplex *z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_ccg_rhokernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param
A magma_c_sparse_matrix
input matrix
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
d_d magmaFloatComplex*
input vector d
@param
d_z magmaFloatComplex*
input vector z
@param
skp magmaFloatComplex*
array for parameters ( skp[3]=rho )
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_ccgmerge_spmv1(
magma_c_sparse_matrix A,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *d_d,
magmaFloatComplex *d_z,
magmaFloatComplex *skp ){
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR )
hipLaunchKernelGGL(( magma_ccgmerge_spmvcsr_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream ,
A.num_rows, A.val, A.row, A.col, d_d, d_z, d1 );
else if( A.storage_type == Magma_ELLPACK )
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpack_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream ,
A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 );
else if( A.storage_type == Magma_ELL )
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackt_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream ,
A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 );
else if( A.storage_type == Magma_SELLC || A.storage_type == Magma_SELLP ){
if( A.blocksize==256){
hipLaunchKernelGGL(( magma_ccgmerge_spmvsellc_kernel), dim3(Gs), dim3(Bs), Ms, magma_stream ,
A.num_rows, A.blocksize, A. val, A.col, A.row,
d_d, d_z, d1 );
}
else
printf("error: SELLC only for blocksize 256.\n");
}
else if( A.storage_type == Magma_SELLP ){
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = sqrt(A.numblocks);
int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1;
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( magmaFloatComplex );
if( A.alignment == 8)
hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_8)
, dim3(gridsellp), dim3(block), Mssellp, magma_stream ,
A.num_rows, A.blocksize, A.alignment,
A.val, A.col, A.row, d_d, d_z);
else if( A.alignment == 16)
hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_16)
, dim3(gridsellp), dim3(block), Mssellp, magma_stream ,
A.num_rows, A.blocksize, A.alignment,
A.val, A.col, A.row, d_d, d_z);
else if( A.alignment == 32)
hipLaunchKernelGGL(( magma_ccgmerge_spmvsellpt_kernel_32)
, dim3(gridsellp), dim3(block), Mssellp, magma_stream ,
A.num_rows, A.blocksize, A.alignment,
A.val, A.col, A.row, d_d, d_z);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, magma_stream ,
A.num_rows, d_z, d_d, d1 );
}
else if( A.storage_type == Magma_ELLRT ){
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize);
int num_threads = A.alignment*A.blocksize;
int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment)
*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = sqrt(num_blocks);
int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1;
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( magmaFloatComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if( A.alignment == 32 ){
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_32)
, dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream ,
A.num_rows, A.val, A.col, A.row, d_d, d_z, d1,
A.alignment, real_row_length );
}
else if( A.alignment == 16 ){
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_16)
, dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream ,
A.num_rows, A.val, A.col, A.row, d_d, d_z, d1,
A.alignment, real_row_length );
}
else if( A.alignment == 8 ){
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel_8)
, dim3(gridellrt), dim3(num_threads) , Mellrt, magma_stream ,
A.num_rows, A.val, A.col, A.row, d_d, d_z, d1,
A.alignment, real_row_length );
}
else{
printf("error: alignment %d not supported.\n", A.alignment);
exit(-1);
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
hipLaunchKernelGGL(( magma_ccgmerge_spmvellpackrt_kernel2), dim3(Gs), dim3(Bs), Ms, magma_stream ,
A.num_rows, d_z, d_d, d1 );
}
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_ccg_rhokernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_ccgmerge_xrbeta_kernel(
int n,
magmaFloatComplex *x,
magmaFloatComplex *r,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *skp,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
magmaFloatComplex rho = skp[3];
magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if( i<n ){
x[i] += rho * d[i] ;
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_ccg_alphabetakernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_C_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_ccg_d_kernel(
int n,
magmaFloatComplex *skp,
magmaFloatComplex *r,
magmaFloatComplex *d
){
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaFloatComplex alpha = skp[0];
if( i<n ){
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param
n int
dimension n
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
d_x magmaFloatComplex*
input vector x
@param
d_r magmaFloatComplex*
input/output vector r
@param
d_d magmaFloatComplex*
input vector d
@param
d_z magmaFloatComplex*
input vector z
@param
skp magmaFloatComplex*
array for parameters
@ingroup magmasparse_csygpuk
********************************************************************/
extern "C" magma_int_t
magma_ccgmerge_xrbeta(
int n,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *d_x,
magmaFloatComplex *d_r,
magmaFloatComplex *d_d,
magmaFloatComplex *d_z,
magmaFloatComplex *skp ){
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
hipLaunchKernelGGL(( magma_ccgmerge_xrbeta_kernel), dim3(Gs), dim3(Bs), Ms, 0,
n, d_x, d_r, d_d, d_z, skp, d1);
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
hipLaunchKernelGGL(( magma_ccgreduce_kernel_spmv1), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2 , 0,
Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+1, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
hipLaunchKernelGGL(( magma_ccg_alphabetakernel), dim3(Gs2), dim3(Bs2), 0, 0, skp );
dim3 Bs3( local_block_size );
dim3 Gs3( (n+local_block_size-1)/local_block_size );
hipLaunchKernelGGL(( magma_ccg_d_kernel), dim3(Gs3), dim3(Bs3), 0, 0, n, skp, d_r, d_d );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
24729b2935231b9f08c2520bfa48739fe77d23e7.cu
|
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@generated from zmergecg.cu normal z -> c, Wed Sep 17 15:08:43 2014
@author Hartwig Anzt
*/
#include "common_magma.h"
#include "magmasparse.h"
#define BLOCK_SIZE 512
#define PRECISION_c
// These routines merge multiple kernels from cmergecg into one
// for a description see
// "Reformulated Conjugate Gradient for the Energy-Aware
// Solution of Linear Systems on GPUs (ICPP '13)
// accelerated reduction for one vector
__global__ void
magma_ccgreduce_kernel_spmv1( int Gs,
int n,
magmaFloatComplex *vtmp,
magmaFloatComplex *vtmp2 ){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int blockSize = 128;
int gridSize = blockSize * 2 * gridDim.x;
temp[Idx] = MAGMA_C_MAKE( 0.0, 0.0);
int i = blockIdx.x * ( blockSize * 2 ) + Idx;
while (i < Gs ) {
temp[ Idx ] += vtmp[ i ];
temp[ Idx ] += ( i + blockSize < Gs ) ? vtmp[ i + blockSize ]
: MAGMA_C_MAKE( 0.0, 0.0);
i += gridSize;
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using CSR and the first step of the reduction
__global__ void
magma_ccgmerge_spmvcsr_kernel(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_rowptr,
magma_index_t *d_colind,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int j;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if( i<n ){
magmaFloatComplex dot = MAGMA_C_ZERO;
int start = d_rowptr[ i ];
int end = d_rowptr[ i+1 ];
for( j=start; j<end; j++)
dot += d_val[ j ] * d[ d_colind[j] ];
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELL and the first step of the reduction
__global__ void
magma_ccgmerge_spmvellpackt_kernel(
int n,
int num_cols_per_row,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row ; k ++){
int col = d_colind [ n * k + i ];
magmaFloatComplex val = d_val [ n * k + i ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLPACK and the first step of the reduction
__global__ void
magma_ccgmerge_spmvellpack_kernel(
int n,
int num_cols_per_row,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int k = 0; k < num_cols_per_row ; k ++){
int col = d_colind [ num_cols_per_row * i + k ];
magmaFloatComplex val = d_val [ num_cols_per_row * i + k ];
if( val != 0)
dot += val * d[ col ];
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_8(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp,
magma_int_t T,
magma_int_t alignment ){
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 4 ){
shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_16(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp,
magma_int_t T,
magma_int_t alignment ){
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 8 ){
shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// computes the SpMV using ELLRT 8 threads per row
__global__ void
magma_ccgmerge_spmvellpackrt_kernel_32(
int n,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowlength,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp,
magma_int_t T,
magma_int_t alignment ){
int idx = blockIdx.y * gridDim.x * blockDim.x +
blockDim.x * blockIdx.x + threadIdx.x ; // global thread index
int idb = threadIdx.x ; // local thread index
int idp = idb%T; // number of threads assigned to one row
int i = idx/T; // row index
extern __shared__ magmaFloatComplex shared[];
if(i < n ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int max_ = (d_rowlength[i]+T-1)/T;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
// original code in paper (not working for me)
//magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ];
//int col = d_colind [ k*(T*alignment)+(i*T)+idp ];
// new code (working for me)
magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ];
int col = d_colind [ k*(T)+(i*alignment)+idp ];
dot += val * d[ col ];
}
shared[idb] = dot;
if( idp < 16 ){
shared[idb]+=shared[idb+16];
if( idp < 8 ) shared[idb]+=shared[idb+8];
if( idp < 4 ) shared[idb]+=shared[idb+4];
if( idp < 2 ) shared[idb]+=shared[idb+2];
if( idp == 0 ) {
z[i] = (shared[idb]+shared[idb+1]);
}
}
}
}
// additional kernel necessary to compute first reduction step
__global__ void
magma_ccgmerge_spmvellpackrt_kernel2(
int n,
magmaFloatComplex *z,
magmaFloatComplex *d,
magmaFloatComplex *vtmp2
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
temp[ Idx ] = ( i < n ) ? z[i]*d[i] : MAGMA_C_MAKE(0.0, 0.0);
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp2[ blockIdx.x ] = temp[ 0 ];
}
}
// computes the SpMV using SELLC
__global__ void
magma_ccgmerge_spmvsellc_kernel(
int num_rows,
int blocksize,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *vtmp){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
int offset = d_rowptr[ blockIdx.x ];
int border = (d_rowptr[ blockIdx.x+1 ]-offset)/blocksize;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if(i < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
for ( int n = 0; n < border; n ++){
int col = d_colind [offset+ blocksize * n + Idx ];
magmaFloatComplex val = d_val[offset+ blocksize * n + Idx];
if( val != 0){
dot=dot+val*d[col];
}
}
z[ i ] = dot;
temp[ Idx ] = d[ i ] * dot;
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_8( int num_rows,
int blocksize,
int T,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d,
magmaFloatComplex *z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 4 ){
shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_16( int num_rows,
int blocksize,
int T,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d,
magmaFloatComplex *z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 8 ){
shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// SELLP SpMV kernel
// see paper by M. KREUTZER, G. HAGER, G WELLEIN, H. FEHSKE A. BISHOP
// A UNIFIED SPARSE MATRIX DATA FORMAT
// FOR MODERN PROCESSORS WITH WIDE SIMD UNITS
// SELLC SpMV kernel modified assigning multiple threads to each row - 2D kernel
__global__ void
magma_ccgmerge_spmvsellpt_kernel_32( int num_rows,
int blocksize,
int T,
magmaFloatComplex *d_val,
magma_index_t *d_colind,
magma_index_t *d_rowptr,
magmaFloatComplex *d,
magmaFloatComplex *z)
{
// T threads assigned to each row
int idx = threadIdx.y ; // thread in row
int idy = threadIdx.x; // local row
int ldx = idx * blocksize + idy;
int bdx = blockIdx.y * gridDim.x + blockIdx.x; // global block index
int row = bdx * blocksize + idy; // global row index
extern __shared__ magmaFloatComplex shared[];
if(row < num_rows ){
magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0);
int offset = d_rowptr[ bdx ];
int block = blocksize * T; // total number of threads
int max_ = (d_rowptr[ bdx+1 ]-offset)/block;
// number of elements each thread handles
for ( int k = 0; k < max_ ; k++ ){
magmaFloatComplex val =
d_val[ offset + ldx + block*k ];
int col =
d_colind[ offset + ldx + block*k ];
dot += val * d[ col ];
}
shared[ldx] = dot;
__syncthreads();
if( idx < 16 ){
shared[ldx]+=shared[ldx+blocksize*16];
__syncthreads();
if( idx < 8 ) shared[ldx]+=shared[ldx+blocksize*8];
__syncthreads();
if( idx < 4 ) shared[ldx]+=shared[ldx+blocksize*4];
__syncthreads();
if( idx < 2 ) shared[ldx]+=shared[ldx+blocksize*2];
__syncthreads();
if( idx == 0 ) {
z[row] =
(shared[ldx]+shared[ldx+blocksize*1]);
}
}
}
}
// kernel to handle scalars
__global__ void // rho = beta/tmp; gamma = beta;
magma_ccg_rhokernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp = skp[1];
skp[3] = tmp/skp[4];
skp[2] = tmp;
}
}
/**
Purpose
-------
Merges the first SpmV using different formats with the dot product
and the computation of rho
Arguments
---------
@param
A magma_c_sparse_matrix
input matrix
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
d_d magmaFloatComplex*
input vector d
@param
d_z magmaFloatComplex*
input vector z
@param
skp magmaFloatComplex*
array for parameters ( skp[3]=rho )
@ingroup magmasparse_cgegpuk
********************************************************************/
extern "C" magma_int_t
magma_ccgmerge_spmv1(
magma_c_sparse_matrix A,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *d_d,
magmaFloatComplex *d_z,
magmaFloatComplex *skp ){
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (A.num_rows+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
if( A.storage_type == Magma_CSR )
magma_ccgmerge_spmvcsr_kernel<<<Gs, Bs, Ms, magma_stream >>>
( A.num_rows, A.val, A.row, A.col, d_d, d_z, d1 );
else if( A.storage_type == Magma_ELLPACK )
magma_ccgmerge_spmvellpack_kernel<<<Gs, Bs, Ms, magma_stream >>>
( A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 );
else if( A.storage_type == Magma_ELL )
magma_ccgmerge_spmvellpackt_kernel<<<Gs, Bs, Ms, magma_stream >>>
( A.num_rows, A.max_nnz_row, A.val, A.col, d_d, d_z, d1 );
else if( A.storage_type == Magma_SELLC || A.storage_type == Magma_SELLP ){
if( A.blocksize==256){
magma_ccgmerge_spmvsellc_kernel<<<Gs, Bs, Ms, magma_stream >>>
( A.num_rows, A.blocksize, A. val, A.col, A.row,
d_d, d_z, d1 );
}
else
printf("error: SELLC only for blocksize 256.\n");
}
else if( A.storage_type == Magma_SELLP ){
int num_threadssellp = A.blocksize*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threadssellp > 256 )
printf("error: too much shared memory requested.\n");
dim3 block( A.blocksize, A.alignment, 1);
int dimgrid1 = sqrt(A.numblocks);
int dimgrid2 = (A.numblocks + dimgrid1 -1 ) / dimgrid1;
dim3 gridsellp( dimgrid1, dimgrid2, 1);
int Mssellp = num_threadssellp * sizeof( magmaFloatComplex );
if( A.alignment == 8)
magma_ccgmerge_spmvsellpt_kernel_8
<<< gridsellp, block, Mssellp, magma_stream >>>
( A.num_rows, A.blocksize, A.alignment,
A.val, A.col, A.row, d_d, d_z);
else if( A.alignment == 16)
magma_ccgmerge_spmvsellpt_kernel_16
<<< gridsellp, block, Mssellp, magma_stream >>>
( A.num_rows, A.blocksize, A.alignment,
A.val, A.col, A.row, d_d, d_z);
else if( A.alignment == 32)
magma_ccgmerge_spmvsellpt_kernel_32
<<< gridsellp, block, Mssellp, magma_stream >>>
( A.num_rows, A.blocksize, A.alignment,
A.val, A.col, A.row, d_d, d_z);
else
printf("error: alignment not supported.\n");
// in case of using SELLP, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_ccgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, magma_stream >>>
( A.num_rows, d_z, d_d, d1 );
}
else if( A.storage_type == Magma_ELLRT ){
// in case of using ELLRT, we need a different grid, assigning
// threads_per_row processors to each row
// the block size is num_threads
// fixed values
int num_blocks = ( (A.num_rows+A.blocksize-1)/A.blocksize);
int num_threads = A.alignment*A.blocksize;
int real_row_length = ((int)(A.max_nnz_row+A.alignment-1)/A.alignment)
*A.alignment;
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 && num_threads > 256 )
printf("error: too much shared memory requested.\n");
int dimgrid1 = sqrt(num_blocks);
int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1;
dim3 gridellrt( dimgrid1, dimgrid2, 1);
int Mellrt = A.alignment * A.blocksize * sizeof( magmaFloatComplex );
// printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms);
if( A.alignment == 32 ){
magma_ccgmerge_spmvellpackrt_kernel_32
<<< gridellrt, num_threads , Mellrt, magma_stream >>>
( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1,
A.alignment, real_row_length );
}
else if( A.alignment == 16 ){
magma_ccgmerge_spmvellpackrt_kernel_16
<<< gridellrt, num_threads , Mellrt, magma_stream >>>
( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1,
A.alignment, real_row_length );
}
else if( A.alignment == 8 ){
magma_ccgmerge_spmvellpackrt_kernel_8
<<< gridellrt, num_threads , Mellrt, magma_stream >>>
( A.num_rows, A.val, A.col, A.row, d_d, d_z, d1,
A.alignment, real_row_length );
}
else{
printf("error: alignment %d not supported.\n", A.alignment);
exit(-1);
}
// in case of using ELLRT, we can't efficiently merge the
// dot product and the first reduction loop into the SpMV kernel
// as the SpMV grid would result in low occupancy.
magma_ccgmerge_spmvellpackrt_kernel2<<<Gs, Bs, Ms, magma_stream >>>
( A.num_rows, d_z, d_d, d1 );
}
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
magma_ccgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, A.num_rows, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+4, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_ccg_rhokernel<<<Gs2, Bs2, 0>>>( skp );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
// updates x and r and computes the first part of the dot product r*r
__global__ void
magma_ccgmerge_xrbeta_kernel(
int n,
magmaFloatComplex *x,
magmaFloatComplex *r,
magmaFloatComplex *d,
magmaFloatComplex *z,
magmaFloatComplex *skp,
magmaFloatComplex *vtmp
){
extern __shared__ magmaFloatComplex temp[];
int Idx = threadIdx.x;
int i = blockIdx.x * blockDim.x + Idx;
magmaFloatComplex rho = skp[3];
magmaFloatComplex mrho = MAGMA_C_MAKE( -1.0, 0.0)*rho;
temp[ Idx ] = MAGMA_C_MAKE( 0.0, 0.0);
if( i<n ){
x[i] += rho * d[i] ;
r[i] += mrho * z[i];
temp[ Idx ] = r[i] * r[i];
}
__syncthreads();
if ( Idx < 128 ){
temp[ Idx ] += temp[ Idx + 128 ];
}
__syncthreads();
if ( Idx < 64 ){
temp[ Idx ] += temp[ Idx + 64 ];
}
__syncthreads();
#if defined(PRECISION_z) || defined(PRECISION_c)
if( Idx < 32 ){
temp[ Idx ] += temp[ Idx + 32 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 16 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 8 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 4 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 2 ];__syncthreads();
temp[ Idx ] += temp[ Idx + 1 ];__syncthreads();
}
#endif
#if defined(PRECISION_d)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
#if defined(PRECISION_s)
if( Idx < 32 ){
volatile float *temp2 = temp;
temp2[ Idx ] += temp2[ Idx + 32 ];
temp2[ Idx ] += temp2[ Idx + 16 ];
temp2[ Idx ] += temp2[ Idx + 8 ];
temp2[ Idx ] += temp2[ Idx + 4 ];
temp2[ Idx ] += temp2[ Idx + 2 ];
temp2[ Idx ] += temp2[ Idx + 1 ];
}
#endif
if ( Idx == 0 ){
vtmp[ blockIdx.x ] = temp[ 0 ];
}
}
// kernel to handle scalars
__global__ void //alpha = beta / gamma
magma_ccg_alphabetakernel(
magmaFloatComplex *skp ){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i==0 ){
magmaFloatComplex tmp1 = skp[1];
skp[0] = tmp1/skp[2];
//printf("beta=%e\n", MAGMA_C_REAL(tmp1));
}
}
// update search Krylov vector d
__global__ void
magma_ccg_d_kernel(
int n,
magmaFloatComplex *skp,
magmaFloatComplex *r,
magmaFloatComplex *d
){
int i = blockIdx.x * blockDim.x + threadIdx.x;
magmaFloatComplex alpha = skp[0];
if( i<n ){
d[i] = r[i] + alpha * d[i];
}
}
/**
Purpose
-------
Merges the update of r and x with the dot product and performs then
the update for the Krylov vector d
Arguments
---------
@param
n int
dimension n
@param
d1 magmaFloatComplex*
temporary vector
@param
d2 magmaFloatComplex*
temporary vector
@param
d_x magmaFloatComplex*
input vector x
@param
d_r magmaFloatComplex*
input/output vector r
@param
d_d magmaFloatComplex*
input vector d
@param
d_z magmaFloatComplex*
input vector z
@param
skp magmaFloatComplex*
array for parameters
@ingroup magmasparse_csygpuk
********************************************************************/
extern "C" magma_int_t
magma_ccgmerge_xrbeta(
int n,
magmaFloatComplex *d1,
magmaFloatComplex *d2,
magmaFloatComplex *d_x,
magmaFloatComplex *d_r,
magmaFloatComplex *d_d,
magmaFloatComplex *d_z,
magmaFloatComplex *skp ){
int local_block_size=256;
dim3 Bs( local_block_size );
dim3 Gs( (n+local_block_size-1)/local_block_size );
dim3 Gs_next;
int Ms = 2*local_block_size * sizeof( magmaFloatComplex );
magmaFloatComplex *aux1 = d1, *aux2 = d2;
int b = 1;
magma_ccgmerge_xrbeta_kernel<<<Gs, Bs, Ms>>>
( n, d_x, d_r, d_d, d_z, skp, d1);
while( Gs.x > 1 ){
Gs_next.x = ( Gs.x+Bs.x-1 )/ Bs.x ;
if( Gs_next.x == 1 ) Gs_next.x = 2;
magma_ccgreduce_kernel_spmv1<<< Gs_next.x/2, Bs.x/2, Ms/2 >>>
( Gs.x, n, aux1, aux2 );
Gs_next.x = Gs_next.x /2;
Gs.x = Gs_next.x;
b = 1 - b;
if( b ){ aux1 = d1; aux2 = d2; }
else { aux2 = d1; aux1 = d2; }
}
magma_ccopyvector( 1, aux1, 1, skp+1, 1 );
dim3 Bs2( 2 );
dim3 Gs2( 1 );
magma_ccg_alphabetakernel<<<Gs2, Bs2, 0>>>( skp );
dim3 Bs3( local_block_size );
dim3 Gs3( (n+local_block_size-1)/local_block_size );
magma_ccg_d_kernel<<<Gs3, Bs3, 0>>>( n, skp, d_r, d_d );
return MAGMA_SUCCESS;
}
/* -------------------------------------------------------------------------- */
|
c5ce2a5e0899009cf58219d8f46f4d0b77bdb9af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <math.h>
#include<stdint.h>
#include<stdlib.h>
#include<cuda.h>
#define WID 1024
#define HEI 1024
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER
{
unsigned short bfType;
uint32_t bfSize;
unsigned short bfReserved1;
unsigned short bfReserved2;
uint32_t bf0ffBits;
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize;
int32_t biWidth;
int32_t biHeight;
unsigned short biPlanes;
unsigned short biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
int32_t biXPelsPerMeter;
int32_t biYPelsPerMeter;
uint32_t biCirUsed;
uint32_t biCirImportant;
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD
{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
typedef struct tagBITMAPINFO
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[1];
}BITMAPINFO;
__global__ void distance_gpu(int *x_d,int *y_d,double *z_d,double *img_buf_d,int *tensuu_d)
{
int i,j,k;
i=blockIdx.x*128+threadIdx.x;
double kankaku,hatyou,goukei,pi;
hatyou=0.633;
kankaku=10.5;
pi=3.14159265;
goukei=2.0*pi*kankaku/hatyou;
double dx,dy,tmp;
for(j=0;j<WID;j++){
tmp=0.0;
for(k=0;k<*tensuu_d;k++){
dx=(double)(x_d[k]-j);
dy=(double)(y_d[k]-i);
tmp=tmp+cos(goukei*sqrt(dx*dx+dy*dy+z_d[k]*z_d[k]));
}
img_buf_d[i*WID+j] = tmp;
}
}
int main(){
int tensuu;
BITMAPFILEHEADER BmpFileHeader;
BITMAPINFOHEADER BmpInfoHeader;
RGBQUAD RGBQuad[256];
FILE *fp;
int i,j;
BmpFileHeader.bfType =19778;
BmpFileHeader.bfSize =14+40+1024+(WID*HEI);
BmpFileHeader.bfReserved1 =0;
BmpFileHeader.bfReserved2 =0;
BmpFileHeader.bf0ffBits =14+40+1024;
BmpInfoHeader.biSize =40;
BmpInfoHeader.biWidth =WID;
BmpInfoHeader.biHeight =HEI;
BmpInfoHeader.biPlanes =1;
BmpInfoHeader.biBitCount =8; //256
BmpInfoHeader.biCompression =0L;
BmpInfoHeader.biSizeImage =0L;
BmpInfoHeader.biXPelsPerMeter =0L;
BmpInfoHeader.biYPelsPerMeter =0L;
BmpInfoHeader.biCirUsed =0L;
BmpInfoHeader.biCirImportant =0L;
for(i=0;i<256;i++){
RGBQuad[i].rgbBlue =i;
RGBQuad[i].rgbGreen =i;
RGBQuad[i].rgbRed =i;
RGBQuad[i].rgbReserved =0;
}
char filename[20]={};
printf(" : ");
scanf("%s",filename);
fp=fopen(filename,"rb");
if(fp==NULL){
printf("\n");
}
fread(&tensuu,sizeof(int),1,fp);
printf("%d\n",tensuu);
int x[tensuu];
int y[tensuu];
double z[tensuu];
int *tensuu_d;
hipMalloc((void**)&tensuu_d,sizeof(int));
hipMemcpy(tensuu_d,&tensuu,sizeof(int),hipMemcpyHostToDevice);
int *x_d,*y_d;
double *z_d;
double *img_buf_d;
dim3 blocks(8,1,1);
dim3 threads(128,1,1);
int x_buf,y_buf,z_buf;
for(i=0;i<tensuu;i++){
fread(&x_buf,sizeof(int),1,fp);
fread(&y_buf,sizeof(int),1,fp);
fread(&z_buf,sizeof(int),1,fp);
x[i]=x_buf*40+512;
y[i]=y_buf*40+512;
z[i]=((double)z_buf)*40+100000.0;
}
fclose(fp);
hipMalloc((void**)&x_d,tensuu*sizeof(int));
hipMalloc((void**)&y_d,tensuu*sizeof(int));
hipMalloc((void**)&z_d,tensuu*sizeof(double));
hipMalloc((void**)&img_buf_d,WID*HEI*sizeof(double));
double *img_buf;
img_buf=(double *)malloc(sizeof(double)*WID*HEI);
for(i=0;i<WID*HEI;i++){
img_buf[i]=0.0;
}
hipMemcpy(x_d,x,tensuu*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(y_d,y,tensuu*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(z_d,z,tensuu*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(double),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( distance_gpu), dim3(blocks),dim3(threads), 0, 0, x_d,y_d,z_d,img_buf_d,tensuu_d);
hipMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(double),hipMemcpyDeviceToHost);
double min,max,mid;
min=img_buf[0];
max=img_buf[0];
for(i=0;i<HEI;i++){
for(j=0;j<WID;j++){
if(min>img_buf[i*WID+j]){
min=img_buf[i*WID+j];
}
if(max<img_buf[i*WID+j]){
max=img_buf[i*WID+j];
}
}
}
mid=0.5F*(min+max);
printf("min = %lf max = %lf mid = %lf\n",min,max,mid);
unsigned char *img;
img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI);
for(i=0;i<WID*HEI;i++){
if(img_buf[i]<mid){
img[i]=0;
}
if(img_buf[i]>mid){
img[i]=255;
}
}
FILE *fp1;
fp1=fopen("cgh_root_gpu.bmp","wb");
if(fp1==NULL){
printf("\n");
}
fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1);
fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1);
fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1);
fwrite(img,sizeof(unsigned char),WID*HEI,fp1);
free(img);
free(img_buf);
fclose(fp1);
hipFree(tensuu_d);
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
hipFree(img_buf_d);
return 0;
}
|
c5ce2a5e0899009cf58219d8f46f4d0b77bdb9af.cu
|
#include <stdio.h>
#include <math.h>
#include<stdint.h>
#include<stdlib.h>
#include<cuda.h>
#define WID 1024
#define HEI 1024
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER
{
unsigned short bfType;
uint32_t bfSize;
unsigned short bfReserved1;
unsigned short bfReserved2;
uint32_t bf0ffBits;
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize;
int32_t biWidth;
int32_t biHeight;
unsigned short biPlanes;
unsigned short biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
int32_t biXPelsPerMeter;
int32_t biYPelsPerMeter;
uint32_t biCirUsed;
uint32_t biCirImportant;
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD
{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
typedef struct tagBITMAPINFO
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[1];
}BITMAPINFO;
__global__ void distance_gpu(int *x_d,int *y_d,double *z_d,double *img_buf_d,int *tensuu_d)
{
int i,j,k;
i=blockIdx.x*128+threadIdx.x;
double kankaku,hatyou,goukei,pi;
hatyou=0.633;
kankaku=10.5;
pi=3.14159265;
goukei=2.0*pi*kankaku/hatyou;
double dx,dy,tmp;
for(j=0;j<WID;j++){
tmp=0.0;
for(k=0;k<*tensuu_d;k++){
dx=(double)(x_d[k]-j);
dy=(double)(y_d[k]-i);
tmp=tmp+cos(goukei*sqrt(dx*dx+dy*dy+z_d[k]*z_d[k]));
}
img_buf_d[i*WID+j] = tmp;
}
}
int main(){
int tensuu;
BITMAPFILEHEADER BmpFileHeader;
BITMAPINFOHEADER BmpInfoHeader;
RGBQUAD RGBQuad[256];
FILE *fp;
int i,j;
BmpFileHeader.bfType =19778;
BmpFileHeader.bfSize =14+40+1024+(WID*HEI);
BmpFileHeader.bfReserved1 =0;
BmpFileHeader.bfReserved2 =0;
BmpFileHeader.bf0ffBits =14+40+1024;
BmpInfoHeader.biSize =40;
BmpInfoHeader.biWidth =WID;
BmpInfoHeader.biHeight =HEI;
BmpInfoHeader.biPlanes =1;
BmpInfoHeader.biBitCount =8; //256階調
BmpInfoHeader.biCompression =0L;
BmpInfoHeader.biSizeImage =0L;
BmpInfoHeader.biXPelsPerMeter =0L;
BmpInfoHeader.biYPelsPerMeter =0L;
BmpInfoHeader.biCirUsed =0L;
BmpInfoHeader.biCirImportant =0L;
for(i=0;i<256;i++){
RGBQuad[i].rgbBlue =i;
RGBQuad[i].rgbGreen =i;
RGBQuad[i].rgbRed =i;
RGBQuad[i].rgbReserved =0;
}
char filename[20]={};
printf("ファイル名を入力してください : ");
scanf("%s",filename);
fp=fopen(filename,"rb");
if(fp==NULL){
printf("ファイルオープンエラー\n");
}
fread(&tensuu,sizeof(int),1,fp);
printf("物体点数は%dです\n",tensuu);
int x[tensuu];
int y[tensuu];
double z[tensuu];
int *tensuu_d;
cudaMalloc((void**)&tensuu_d,sizeof(int));
cudaMemcpy(tensuu_d,&tensuu,sizeof(int),cudaMemcpyHostToDevice);
int *x_d,*y_d;
double *z_d;
double *img_buf_d;
dim3 blocks(8,1,1);
dim3 threads(128,1,1);
int x_buf,y_buf,z_buf;
for(i=0;i<tensuu;i++){
fread(&x_buf,sizeof(int),1,fp);
fread(&y_buf,sizeof(int),1,fp);
fread(&z_buf,sizeof(int),1,fp);
x[i]=x_buf*40+512;
y[i]=y_buf*40+512;
z[i]=((double)z_buf)*40+100000.0;
}
fclose(fp);
cudaMalloc((void**)&x_d,tensuu*sizeof(int));
cudaMalloc((void**)&y_d,tensuu*sizeof(int));
cudaMalloc((void**)&z_d,tensuu*sizeof(double));
cudaMalloc((void**)&img_buf_d,WID*HEI*sizeof(double));
double *img_buf;
img_buf=(double *)malloc(sizeof(double)*WID*HEI);
for(i=0;i<WID*HEI;i++){
img_buf[i]=0.0;
}
cudaMemcpy(x_d,x,tensuu*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(y_d,y,tensuu*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(z_d,z,tensuu*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(double),cudaMemcpyHostToDevice);
distance_gpu<<<blocks,threads>>>(x_d,y_d,z_d,img_buf_d,tensuu_d);
cudaMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(double),cudaMemcpyDeviceToHost);
double min,max,mid;
min=img_buf[0];
max=img_buf[0];
for(i=0;i<HEI;i++){
for(j=0;j<WID;j++){
if(min>img_buf[i*WID+j]){
min=img_buf[i*WID+j];
}
if(max<img_buf[i*WID+j]){
max=img_buf[i*WID+j];
}
}
}
mid=0.5F*(min+max);
printf("min = %lf max = %lf mid = %lf\n",min,max,mid);
unsigned char *img;
img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI);
for(i=0;i<WID*HEI;i++){
if(img_buf[i]<mid){
img[i]=0;
}
if(img_buf[i]>mid){
img[i]=255;
}
}
FILE *fp1;
fp1=fopen("cgh_root_gpu.bmp","wb");
if(fp1==NULL){
printf("ファイルオープンエラー\n");
}
fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1);
fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1);
fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1);
fwrite(img,sizeof(unsigned char),WID*HEI,fp1);
free(img);
free(img_buf);
fclose(fp1);
cudaFree(tensuu_d);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(img_buf_d);
return 0;
}
|
619e13db3eca614a9226d7e6a76b2baa338620f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#ifndef HAMC_SCRATCH_H
#define HAMC_SCRATCH_H
#include <wb.h>
#include <bits/getopt_core.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime_api.h>
#include <hip/driver_types.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <stdint.h>
#include <sys/time.h>
#include "../../src/hamc/hamc_cpu_code.c"
#include "../../src/hamc/LU_inverse_plain.cu"
void run_find_max_kernel(bin_matrix A)
{
HAMC_DATA_TYPE_t *deviceA;
hipMalloc((void **)
&deviceA, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t));
hipMemcpy(deviceA, A->data, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t),
hipMemcpyHostToDevice);
int *deviceIPIV;
hipMalloc((void **) &deviceIPIV, A->rows * sizeof(int));
hipLaunchKernelGGL(( GF2_LU_decompose_find_max_row), dim3(1),dim3(1), 0, 0, deviceA, deviceIPIV, 0, 0);
hipError_t cudaerr = hipDeviceSynchronize();
if (cudaerr != hipSuccess)
printf("kernel launch failed with error \"%s\".\n",
hipGetErrorString(cudaerr));
return;
}
int main(int argc, char *argv[]){
bool verbose = true;
int n = 2;
int p = 512;
int t = 10;
int w = 30;
int seed = 10;
int opt;
while ((opt = getopt(argc, argv, "n:")) != -1){
switch(opt){
case 'n':
p = atoi(optarg);
break;
}
}
bin_matrix invertible_matrix;
mdpc code;
code = qc_mdpc_init_cpu(n, p, t, w, seed);
invertible_matrix = make_matrix_cpu(code->p, code->p, splice_cpu(code->row, (code->n0 - 1) * code->p, code->n), 1);
if (verbose) {
printf("Input matrix size: %dx%d\n",
invertible_matrix->rows, invertible_matrix->cols);
}
run_find_max_kernel(invertible_matrix);
if (verbose) printf("Freeing allocated memory...\n");
if (invertible_matrix != NULL) free(invertible_matrix);
return 0;
}
#endif /* HAMC_SCRATCH_H */
|
619e13db3eca614a9226d7e6a76b2baa338620f3.cu
|
#ifndef HAMC_SCRATCH_H
#define HAMC_SCRATCH_H
#include <wb.h>
#include <bits/getopt_core.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime_api.h>
#include <driver_types.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <string>
#include <stdint.h>
#include <sys/time.h>
#include "../../src/hamc/hamc_cpu_code.c"
#include "../../src/hamc/LU_inverse_plain.cu"
void run_find_max_kernel(bin_matrix A)
{
HAMC_DATA_TYPE_t *deviceA;
cudaMalloc((void **)
&deviceA, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t));
cudaMemcpy(deviceA, A->data, A->rows * A->cols * sizeof(HAMC_DATA_TYPE_t),
cudaMemcpyHostToDevice);
int *deviceIPIV;
cudaMalloc((void **) &deviceIPIV, A->rows * sizeof(int));
GF2_LU_decompose_find_max_row<<<1,1>>>(deviceA, deviceIPIV, 0, 0);
cudaError_t cudaerr = cudaDeviceSynchronize();
if (cudaerr != cudaSuccess)
printf("kernel launch failed with error \"%s\".\n",
cudaGetErrorString(cudaerr));
return;
}
int main(int argc, char *argv[]){
bool verbose = true;
int n = 2;
int p = 512;
int t = 10;
int w = 30;
int seed = 10;
int opt;
while ((opt = getopt(argc, argv, "n:")) != -1){
switch(opt){
case 'n':
p = atoi(optarg);
break;
}
}
bin_matrix invertible_matrix;
mdpc code;
code = qc_mdpc_init_cpu(n, p, t, w, seed);
invertible_matrix = make_matrix_cpu(code->p, code->p, splice_cpu(code->row, (code->n0 - 1) * code->p, code->n), 1);
if (verbose) {
printf("Input matrix size: %dx%d\n",
invertible_matrix->rows, invertible_matrix->cols);
}
run_find_max_kernel(invertible_matrix);
if (verbose) printf("Freeing allocated memory...\n");
if (invertible_matrix != NULL) free(invertible_matrix);
return 0;
}
#endif /* HAMC_SCRATCH_H */
|
0ac065cb5be64a339687ec5f4d5952962018635b.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2013 Diana-Andreea Popescu, EPFL & CERN, Switzerland. All rights reserved.
*
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include "Comp.cuh"
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/remove.h>
inline
void checkCuda(hipError_t result)
{
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
hipGetErrorString(result));
exit(EXIT_FAILURE);
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
computeResultTersmArbitrarySized(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
unsigned long long ekey = 0;
double Ccoeff = 0;
int c = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty];
ekey = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
ekey = MAX_EXP * ekey + Cexp[k];
}
exp_keys[c] = ekey;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
multiply_pols_truncate2(int *exp_C, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, int* stencil)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
double Ccoeff = 0;
int c = 0;
int sum = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty];
sum = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
//printf("%d %d %d %d\n", k, Cexp[k], Aes[tx + k * BLOCK_SIZE_X], Bes[ty + k * BLOCK_SIZE_Y]);
sum += Cexp[k];
}
stencil[c] = order - sum;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
multiply_pols_truncate(int *exp_C, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, unsigned int* stencil)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
double Ccoeff = 0;
int c = 0;
int sum = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty];
sum = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
sum += Cexp[k];
}
if (sum <= order) {
stencil[c] = 1;
}
else stencil[c] = 0;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
multiply_pols_truncate_key(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, unsigned int* stencil, double transform)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
unsigned long long ekey = 0;
double Ccoeff = 0;
int c = 0;
int sum = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty] * transform;
ekey = 0;
sum = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
ekey = MAX_EXP * ekey + Cexp[k];
sum += Cexp[k];
}
if (sum <= order)
stencil[c] = 1;
else stencil[c] = 0;
exp_keys[c] = ekey;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
multiply_pols_truncate_key2(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, int* stencil, double transform)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
unsigned long long ekey = 0;
double Ccoeff = 0;
int c = 0;
int sum = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty] * transform;
ekey = 0;
sum = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
ekey = MAX_EXP * ekey + Cexp[k];
sum += Cexp[k];
}
stencil[c] = order - sum;
exp_keys[c] = ekey;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int NVARS, int MAX_EXP> __global__ void
get_keys_from_exponents(unsigned long long *exp_keys, int *exp_A, unsigned int nA)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int tbx = tx + bx * blockDim.x;
int offset = blockDim.x * gridDim.x;
unsigned long long ekey = 0;
while (tbx < nA) {
ekey = 0;
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
ekey = MAX_EXP * ekey + exp_A[tbx + k * nA];
exp_keys[tbx] = ekey;
}
//update index
tbx += offset;
}
}
template <int NVARS, int MAX_EXP> __global__ void
get_exponents_from_key(int *exp_C, unsigned long long *exp_keys, unsigned int nC)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int tbx = tx + bx * blockDim.x;
int offset = blockDim.x * gridDim.x;
unsigned long long key = 0, kd = 0;
while (tbx < nC) {
key = exp_keys[tbx];
// printf("%llu \n", key);
#pragma unroll
for (int k = NVARS - 1; k >= 0; k--) {
kd = key/MAX_EXP;
exp_C[tbx + k * nC] = key - kd * MAX_EXP;
key = kd;
//exp_C[tbx + k * nC] = key % MAX_EXP;
//key /= MAX_EXP;
}
tbx += offset;
}
}
/*void initPol(unsigned int *exps, unsigned int dim, double *coeffs, unsigned int nvars)
{
for (unsigned int i = 0; i < dim; ++i)
{
for (unsigned int k = 0; k < nvars; ++k)
exps[i + k * dim] = rand() % 20;
coeffs[i] = 1;
}
// for (unsigned int i = 0; i < dim * nvars; ++i)
// printf("%d ", exps[i]);
}*/
void multiply_truncate(int *exp_C, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, unsigned int* stencil, hipStream_t stream){
dim3 threads(BL_SIZE_X, BL_SIZE_Y);
int x1 = (nA % threads.x == 0) ? 0 : 1;
int y1 = (nB % threads.y == 0) ? 0 : 1;
dim3 grid(nA/BL_SIZE_X + x1, nB/BL_SIZE_Y + y1);
hipLaunchKernelGGL(( multiply_pols_truncate<BL_SIZE_X, BL_SIZE_Y, NRVARS, MAX_E>) , dim3(grid), dim3(threads), 0, stream , exp_C, exp_A, exp_B, coeff_C, coeff_A, coeff_B, nC, nA, nB, order, stencil);
getLastCudaError("multiply truncate execution FAILED\n");
}
void multiply_truncate2(int *exp_C, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, int* stencil, hipStream_t stream){
dim3 threads(BL_SIZE_X, BL_SIZE_Y);
int x1 = (nA % threads.x == 0) ? 0 : 1;
int y1 = (nB % threads.y == 0) ? 0 : 1;
dim3 grid(nA/BL_SIZE_X + x1, nB/BL_SIZE_Y + y1);
hipLaunchKernelGGL(( multiply_pols_truncate2<BL_SIZE_X, BL_SIZE_Y, NRVARS, MAX_E>) , dim3(grid), dim3(threads), 0, stream , exp_C, exp_A, exp_B, coeff_C, coeff_A, coeff_B, nC, nA, nB, order, stencil);
getLastCudaError("multiply truncate execution FAILED\n");
}
void multiply_truncate_key(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, unsigned int* stencil, double coef, hipStream_t stream){
dim3 threads(BL_SIZE_X, BL_SIZE_Y);
int x1 = (nA % threads.x == 0) ? 0 : 1;
int y1 = (nB % threads.y == 0) ? 0 : 1;
dim3 grid(nA/BL_SIZE_X + x1, nB/BL_SIZE_Y + y1);
hipLaunchKernelGGL(( multiply_pols_truncate_key<BL_SIZE_X, BL_SIZE_Y, NRVARS, MAX_E>) , dim3(grid), dim3(threads), 0, stream , exp_C, exp_keys,exp_A, exp_B, coeff_C, coeff_A, coeff_B, nC, nA, nB, order, stencil, coef);
getLastCudaError("multiply truncate key execution FAILED\n");
}
void multiply_truncate_key2(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, int* stencil, double coef, hipStream_t stream){
dim3 threads(BL_SIZE_X, BL_SIZE_Y);
int x1 = (nA % threads.x == 0) ? 0 : 1;
int y1 = (nB % threads.y == 0) ? 0 : 1;
dim3 grid(nA/BL_SIZE_X + x1, nB/BL_SIZE_Y + y1);
hipLaunchKernelGGL(( multiply_pols_truncate_key2<BL_SIZE_X, BL_SIZE_Y, NRVARS, MAX_E>) , dim3(grid), dim3(threads), 0, stream , exp_C, exp_keys,exp_A, exp_B, coeff_C, coeff_A, coeff_B, nC, nA, nB, order, stencil, coef);
getLastCudaError("multiply truncate key execution FAILED\n");
}
void get_exponents(int *exp_C, unsigned long long *exp_keys, unsigned int nC){
dim3 threads(THREADBLOCK_SIZE);
int x1 = (nC % threads.x == 0) ? 0 : 1;
dim3 grid(nC/THREADBLOCK_SIZE + x1);
hipLaunchKernelGGL(( get_exponents_from_key<NRVARS, MAX_E>), dim3(grid), dim3(threads), 0, 0, exp_C, exp_keys, nC);
}
void get_keys(unsigned long long *exp_keys, int* exp_C, unsigned int nC, hipStream_t stream){
dim3 threads(THREADBLOCK_SIZE);
int x1 = (nC % threads.x == 0) ? 0 : 1;
dim3 grid(nC/THREADBLOCK_SIZE + x1);
hipLaunchKernelGGL(( get_keys_from_exponents<NRVARS, MAX_E>), dim3(grid), dim3(threads), 0, stream, exp_keys, exp_C, nC);
}
|
0ac065cb5be64a339687ec5f4d5952962018635b.cu
|
/**
* Copyright 2013 Diana-Andreea Popescu, EPFL & CERN, Switzerland. All rights reserved.
*
*/
// System includes
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include "Comp.cuh"
// CUDA runtime
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#include <helper_cuda.h>
// Helper functions and utilities to work with CUDA
#include <helper_functions.h>
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200)
#define printf(f, ...) ((void)(f, __VA_ARGS__),0)
#endif
#include <thrust/device_ptr.h>
#include <thrust/reduce.h>
#include <thrust/sort.h>
#include <thrust/remove.h>
inline
void checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n",
cudaGetErrorString(result));
exit(EXIT_FAILURE);
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
computeResultTersmArbitrarySized(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
unsigned long long ekey = 0;
double Ccoeff = 0;
int c = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty];
ekey = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
ekey = MAX_EXP * ekey + Cexp[k];
}
exp_keys[c] = ekey;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
multiply_pols_truncate2(int *exp_C, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, int* stencil)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
double Ccoeff = 0;
int c = 0;
int sum = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty];
sum = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
//printf("%d %d %d %d\n", k, Cexp[k], Aes[tx + k * BLOCK_SIZE_X], Bes[ty + k * BLOCK_SIZE_Y]);
sum += Cexp[k];
}
stencil[c] = order - sum;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
multiply_pols_truncate(int *exp_C, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, unsigned int* stencil)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
double Ccoeff = 0;
int c = 0;
int sum = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty];
sum = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
sum += Cexp[k];
}
if (sum <= order) {
stencil[c] = 1;
}
else stencil[c] = 0;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
multiply_pols_truncate_key(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, unsigned int* stencil, double transform)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
unsigned long long ekey = 0;
double Ccoeff = 0;
int c = 0;
int sum = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty] * transform;
ekey = 0;
sum = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
ekey = MAX_EXP * ekey + Cexp[k];
sum += Cexp[k];
}
if (sum <= order)
stencil[c] = 1;
else stencil[c] = 0;
exp_keys[c] = ekey;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int BLOCK_SIZE_X, int BLOCK_SIZE_Y, int NVARS, int MAX_EXP> __global__ void
multiply_pols_truncate_key2(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, int* stencil, double transform)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int tbx0 = tx + bx * BLOCK_SIZE_X;
int tby0 = ty + by * BLOCK_SIZE_Y;
int tbx = tbx0;
int tby = tby0;
int offsetx = BLOCK_SIZE_X * gridDim.x;
int offsety = BLOCK_SIZE_Y * gridDim.y;
__shared__ int Aes[BLOCK_SIZE_X * NVARS];
__shared__ double Acs[BLOCK_SIZE_X];
__shared__ int Bes[BLOCK_SIZE_Y * NVARS];
__shared__ double Bcs[BLOCK_SIZE_Y];
int Cexp[NVARS];
unsigned long long ekey = 0;
double Ccoeff = 0;
int c = 0;
int sum = 0;
while (tby < nB) {
while (tbx < nA) {
Acs[tx] = coeff_A[tbx];
Bcs[ty] = coeff_B[tby];
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
Aes[tx + k * BLOCK_SIZE_X] = exp_A[tbx + k * nA];
Bes[ty + k * BLOCK_SIZE_Y] = exp_B[tby + k * nB];
}
// Ccoeff is used to store the coefficient of the term product
// that is computed by the thread
Ccoeff = Acs[tx] * Bcs[ty] * transform;
ekey = 0;
sum = 0;
c = nA * tby + tbx;
coeff_C[c] = Ccoeff;
#pragma unroll
for (int k = 0; k < NVARS; ++k)
{
Cexp[k] = Aes[tx + k * BLOCK_SIZE_X] + Bes[ty + k * BLOCK_SIZE_Y];
exp_C[c + k * nC] = Cexp[k];
ekey = MAX_EXP * ekey + Cexp[k];
sum += Cexp[k];
}
stencil[c] = order - sum;
exp_keys[c] = ekey;
//update index
tbx += offsetx;
}
//update index
tby += offsety;
//reset
tbx = tbx0;
}
}
template <int NVARS, int MAX_EXP> __global__ void
get_keys_from_exponents(unsigned long long *exp_keys, int *exp_A, unsigned int nA)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int tbx = tx + bx * blockDim.x;
int offset = blockDim.x * gridDim.x;
unsigned long long ekey = 0;
while (tbx < nA) {
ekey = 0;
#pragma unroll
for (int k = 0; k < NVARS; ++k) {
ekey = MAX_EXP * ekey + exp_A[tbx + k * nA];
exp_keys[tbx] = ekey;
}
//update index
tbx += offset;
}
}
template <int NVARS, int MAX_EXP> __global__ void
get_exponents_from_key(int *exp_C, unsigned long long *exp_keys, unsigned int nC)
{
// Block index
int bx = blockIdx.x;
// Thread index
int tx = threadIdx.x;
int tbx = tx + bx * blockDim.x;
int offset = blockDim.x * gridDim.x;
unsigned long long key = 0, kd = 0;
while (tbx < nC) {
key = exp_keys[tbx];
// printf("%llu \n", key);
#pragma unroll
for (int k = NVARS - 1; k >= 0; k--) {
kd = key/MAX_EXP;
exp_C[tbx + k * nC] = key - kd * MAX_EXP;
key = kd;
//exp_C[tbx + k * nC] = key % MAX_EXP;
//key /= MAX_EXP;
}
tbx += offset;
}
}
/*void initPol(unsigned int *exps, unsigned int dim, double *coeffs, unsigned int nvars)
{
for (unsigned int i = 0; i < dim; ++i)
{
for (unsigned int k = 0; k < nvars; ++k)
exps[i + k * dim] = rand() % 20;
coeffs[i] = 1;
}
// for (unsigned int i = 0; i < dim * nvars; ++i)
// printf("%d ", exps[i]);
}*/
void multiply_truncate(int *exp_C, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, unsigned int* stencil, cudaStream_t stream){
dim3 threads(BL_SIZE_X, BL_SIZE_Y);
int x1 = (nA % threads.x == 0) ? 0 : 1;
int y1 = (nB % threads.y == 0) ? 0 : 1;
dim3 grid(nA/BL_SIZE_X + x1, nB/BL_SIZE_Y + y1);
multiply_pols_truncate<BL_SIZE_X, BL_SIZE_Y, NRVARS, MAX_E> <<< grid, threads, 0, stream >>>(exp_C, exp_A, exp_B, coeff_C, coeff_A, coeff_B, nC, nA, nB, order, stencil);
getLastCudaError("multiply truncate execution FAILED\n");
}
void multiply_truncate2(int *exp_C, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, int* stencil, cudaStream_t stream){
dim3 threads(BL_SIZE_X, BL_SIZE_Y);
int x1 = (nA % threads.x == 0) ? 0 : 1;
int y1 = (nB % threads.y == 0) ? 0 : 1;
dim3 grid(nA/BL_SIZE_X + x1, nB/BL_SIZE_Y + y1);
multiply_pols_truncate2<BL_SIZE_X, BL_SIZE_Y, NRVARS, MAX_E> <<< grid, threads, 0, stream >>>(exp_C, exp_A, exp_B, coeff_C, coeff_A, coeff_B, nC, nA, nB, order, stencil);
getLastCudaError("multiply truncate execution FAILED\n");
}
void multiply_truncate_key(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, unsigned int* stencil, double coef, cudaStream_t stream){
dim3 threads(BL_SIZE_X, BL_SIZE_Y);
int x1 = (nA % threads.x == 0) ? 0 : 1;
int y1 = (nB % threads.y == 0) ? 0 : 1;
dim3 grid(nA/BL_SIZE_X + x1, nB/BL_SIZE_Y + y1);
multiply_pols_truncate_key<BL_SIZE_X, BL_SIZE_Y, NRVARS, MAX_E> <<< grid, threads, 0, stream >>>(exp_C, exp_keys,exp_A, exp_B, coeff_C, coeff_A, coeff_B, nC, nA, nB, order, stencil, coef);
getLastCudaError("multiply truncate key execution FAILED\n");
}
void multiply_truncate_key2(int *exp_C, unsigned long long *exp_keys, int *exp_A, int *exp_B,
double *coeff_C, double *coeff_A, double *coeff_B,
unsigned int nC, unsigned int nA, unsigned int nB,
int order, int* stencil, double coef, cudaStream_t stream){
dim3 threads(BL_SIZE_X, BL_SIZE_Y);
int x1 = (nA % threads.x == 0) ? 0 : 1;
int y1 = (nB % threads.y == 0) ? 0 : 1;
dim3 grid(nA/BL_SIZE_X + x1, nB/BL_SIZE_Y + y1);
multiply_pols_truncate_key2<BL_SIZE_X, BL_SIZE_Y, NRVARS, MAX_E> <<< grid, threads, 0, stream >>>(exp_C, exp_keys,exp_A, exp_B, coeff_C, coeff_A, coeff_B, nC, nA, nB, order, stencil, coef);
getLastCudaError("multiply truncate key execution FAILED\n");
}
void get_exponents(int *exp_C, unsigned long long *exp_keys, unsigned int nC){
dim3 threads(THREADBLOCK_SIZE);
int x1 = (nC % threads.x == 0) ? 0 : 1;
dim3 grid(nC/THREADBLOCK_SIZE + x1);
get_exponents_from_key<NRVARS, MAX_E><<<grid, threads>>>(exp_C, exp_keys, nC);
}
void get_keys(unsigned long long *exp_keys, int* exp_C, unsigned int nC, cudaStream_t stream){
dim3 threads(THREADBLOCK_SIZE);
int x1 = (nC % threads.x == 0) ? 0 : 1;
dim3 grid(nC/THREADBLOCK_SIZE + x1);
get_keys_from_exponents<NRVARS, MAX_E><<<grid, threads, 0, stream>>>(exp_keys, exp_C, nC);
}
|
118a160f2b6dda7eb75fb85c5a43c84e255e079b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layers/operator.h"
#include <float.h>
#include <string.h>
// channel-wise softmax transform: bottom3d (N x C x D) -> top3d (N x C x D)
// top[n][c][d] = exp(bottom[n][c][d]) / sum_c(exp(bottom[n][c][d]))
// 1. to avoid that exp(...) goes to infinity, subtract max
// exp(x_c) / sum_c(exp(x_c))
// = ( exp(x_c) / exp(x_max) ) / sum_c( exp(x_c) / exp(x_max) )
// = exp(x_c - x_max) / sum_c(exp(x_c - x_max))
// 2. thus, the transform cosists of 5 steps:
// a. max[n][d] = max_c(bottom[n][c][d])
// b. sub[n][c][d] = bottom[n][c][d] - max[n][d]
// c. exp[n][c][d] = exp(sub[n][c][d])
// d. sum[n][d] = sum_c(exp[n][c][d])
// e. top[n][c][d] = exp[n][c][d] / sum[n][d]
// --------------------------------------------------------------------------
// kernel code
// channel_max_{gpu, cpu}
// subtract_max_{gpu, cpu}
// exp_{gpu, cpu}
// channel_sum_{gpu, cpu}
// div_sum_{gpu, cpu}
// softmax_inplace
// --------------------------------------------------------------------------
// compute max2d[n][d] = max_c(data3d[n][c][d])
#ifdef GPU
__global__
static
void channel_max_gpu(const real data3d[], real max2d[],
const int N, const int C, const int D)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
const long int max_size = N * D;
if (index < max_size) {
const int n = index / D;
const int d = index % D;
real maxval = -FLT_MAX;
for (int c = 0; c < C; ++c) {
const long int data_index = (n * C + c) * D + d;
maxval = MAX(maxval, data3d[data_index]);
}
max2d[index] = maxval;
}
}
#else
static
void channel_max_cpu(const real data3d[], real max2d[],
const int N, const int C, const int D)
{
for (int n = 0; n < N; ++n) {
for (int d = 0; d < D; ++d) {
const long int max_index = n * D + d;
real maxval = -FLT_MAX;
for (int c = 0; c < C; ++c) {
const long int data_index = (n * C + c) * D + d;
maxval = MAX(maxval, data3d[data_index]);
}
max2d[max_index] = maxval;
}
}
}
#endif
// in-place subtraction: data3d[n][c][d] = data3d[n][c][d] - max2d[n][d]
#ifdef GPU
__global__
static
void subtract_max_gpu(real data3d[], const real max2d[],
const int N, const int C, const int D)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
const long int data_size = N * C * D;
if (index < data_size) {
const int n = index / C / D;
const int d = index % D;
const long int max_index = n * D + d;
data3d[index] -= max2d[max_index];
}
}
#else
static
void subtract_max_cpu(real data3d[], const real max2d[],
const int N, const int C, const int D)
{
for (int n = 0; n < N; ++n) {
for (int c = 0; c < C; ++c) {
for (int d = 0; d < D; ++d) {
const long int data_index = (n * C + c) * D + d;
const long int max_index = n * D + d;
data3d[data_index] -= max2d[max_index];
} // endfor d
} // endfor c
} // endfor n
}
#endif
// in-place element-wise exp: data3d[n][c][d] = exp(data[n][c][d])
#ifdef GPU
__global__
static
void exp_gpu(real data[], const long int data_size)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
data[index] = exp(data[index]);
}
}
#else
static
void exp_cpu(real data[], const long int data_size)
{
for (long int index = 0; index < data_size; ++index) {
data[index] = exp(data[index]);
}
}
#endif
// compute sum2d[n][d] = sum_c(data3d[n][c][d])
#ifdef GPU
__global__
static
void channel_sum_gpu(const real data3d[], real sum2d[],
const int N, const int C, const int D)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
const long int sum_size = N * D;
if (index < sum_size) {
const int n = index / D;
const int d = index % D;
real sumval = 0;
for (int c = 0; c < C; ++c) {
const long int data_index = (n * C + c) * D + d;
sumval += data3d[data_index];
}
sum2d[index] = sumval;
}
}
#else
static
void channel_sum_cpu(const real data3d[], real sum2d[],
const int N, const int C, const int D)
{
for (int n = 0; n < N; ++n) {
for (int d = 0; d < D; ++d) {
const long int sum_index = n * D + d;
real sumval = 0;
for (int c = 0; c < C; ++c) {
const long int data_index = (n * C + c) * D + d;
sumval += data3d[data_index];
}
sum2d[sum_index] = sumval;
}
}
}
#endif
// in-place division: data3d[n][c][d] = data3d[n][c][d] / sum2d[n][d]
#ifdef GPU
__global__
static
void div_sum_gpu(real data3d[], const real sum2d[],
const int N, const int C, const int D)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N * C * D) {
const int n = index / C / D;
const int d = index % D;
const long int sum_index = n * D + d;
data3d[index] /= sum2d[sum_index];
}
}
#else
static
void div_sum_cpu(real data3d[], const real sum2d[],
const int N, const int C, const int D)
{
for (int n = 0; n < N; ++n) {
for (int c = 0; c < C; ++c) {
for (int d = 0; d < D; ++d) {
const long int data_index = (n * C + c) * D + d;
const long int sum_index = n * D + d;
data3d[data_index] /= sum2d[sum_index];
} // endfor d
} // endfor c
} // endfor n
}
#endif
// channel-wise in-place softmax transform
// bottom[n][c][d] = exp(bottom[n][c][d]) / sum_c(exp(bottom[n][c][d]))
// bottom3d: N x C x D array
// temp_data: N * D array, temporary space for channel-wise sum or max
// e.g., temp_data[n][d] = sum_c(exp(bottom3d[n][c][d]))
static
void softmax_inplace(real bottom3d[], real temp_data[],
const int N, const int C, const int D)
{
// 1. max[n][d] = max_c(bottom[n][c][d])
{
#ifdef GPU
const long int num_threads = N * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( channel_max_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom3d, temp_data, N, C, D);
#else
channel_max_cpu(bottom3d, temp_data, N, C, D);
#endif
}
// 2. sub[n][c][d] = bottom[n][c][d] - max[n][d]
{
#ifdef GPU
const long int num_threads = N * C * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( subtract_max_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom3d, temp_data, N, C, D);
#else
subtract_max_cpu(bottom3d, temp_data, N, C, D);
#endif
}
// 3. exp[n][c][d] = exp(sub[n][c][d])
{
#ifdef GPU
const long int num_threads = N * C * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( exp_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom3d, num_threads);
#else
const long int data_size = N * C * D;
exp_cpu(bottom3d, data_size);
#endif
}
// 4. sum[n][d] = sum_c(exp[n][c][d])
{
#ifdef GPU
const long int num_threads = N * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( channel_sum_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom3d, temp_data, N, C, D);
#else
channel_sum_cpu(bottom3d, temp_data, N, C, D);
#endif
}
// 5. top[n][c][d] = exp[n][c][d] / sum[n][d]
{
#ifdef GPU
const long int num_threads = N * C * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
hipLaunchKernelGGL(( div_sum_gpu), dim3(num_blocks), dim3(threads_per_block), 0, 0,
bottom3d, temp_data, N, C, D);
#else
div_sum_cpu(bottom3d, temp_data, N, C, D);
#endif
}
}
// --------------------------------------------------------------------------
// layer-wise operator code
// --------------------------------------------------------------------------
// channel-wise softmax transform: bottom3d (N x C x D) -> top3d (N x C x D)
// top[n][c][d] = exp(bottom[n][c][d]) / sum_c(exp(bottom[n][c][d]))
// option->channel_axis: axis index to be considered as "channel"
// e.g., option->channel_axis = 0 if bottom = C x H x W tensor
// N = product of shape[0, ..., option->channel_axis-1]
// C = shape[option->channel_axis]
// D = product of shape[option->channel_axis+1, ..., ndim-1]
// temp_data: N * D array, temporary space for channel-wise sum or max
// e.g., temp_data[n][d] = sum_c(exp(bottom[n][c][d]))
static
void softmax_forward(const Tensor* const bottom,
Tensor* const top,
real temp_data[],
const LayerOption* const option)
{
// copy bottom -> top, and then perform inplace operation
if (bottom->data != top->data) {
const long int data_size = get_data_size(bottom);
#ifdef GPU
hipMemcpyAsync(top->data, bottom->data, data_size * sizeof(real),
hipMemcpyDeviceToDevice);
#else
memcpy(top->data, bottom->data, data_size * sizeof(real));
#endif
}
// perform in-place softmax operation
for (int n = 0; n < bottom->num_items; ++n) {
real* const p_top_item = top->data + bottom->start[n];
const int C = bottom->shape[n][option->channel_axis];
int N = 1, D = 1;
for (int i = 0; i < option->channel_axis; ++i) {
N *= bottom->shape[n][i];
}
for (int i = option->channel_axis + 1; i < bottom->ndim; ++i) {
D *= bottom->shape[n][i];
}
softmax_inplace(p_top_item, temp_data, N, C, D);
}
}
// --------------------------------------------------------------------------
// output shape calculator code
// --------------------------------------------------------------------------
static
void softmax_shape(const Tensor* const bottom,
Tensor* const top,
long int* const p_temp_space,
const LayerOption* const option)
{
// top shape = bottom shape
if (bottom != top) {
top->ndim = bottom->ndim;
top->num_items = bottom->num_items;
for (int n = 0; n < bottom->num_items; ++n) {
for (int i = 0; i < bottom->ndim; ++i) {
top->shape[n][i] = bottom->shape[n][i];
}
}
for (int n = 0; n < bottom->num_items; ++n) {
top->start[n] = bottom->start[n];
}
}
// temporary space for channel-wise sum or max: N * D
{
int ND_max = 0;
for (int n = 0; n < bottom->num_items; ++n) {
int N = 1, D = 1;
for (int i = 0; i < option->channel_axis; ++i) {
N *= bottom->shape[n][i];
}
for (int i = option->channel_axis + 1; i < bottom->ndim; ++i) {
D *= bottom->shape[n][i];
}
ND_max = MAX(ND_max, N * D);
}
*p_temp_space = ND_max * sizeof(real);
}
}
// --------------------------------------------------------------------------
// functions for layer instance
// --------------------------------------------------------------------------
void forward_softmax_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
softmax_forward(get_bottom(layer, 0), get_top(layer, 0),
net->temp_data, &layer->option);
}
void shape_softmax_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
long int temp_space;
softmax_shape(get_bottom(layer, 0), get_top(layer, 0),
&temp_space, &layer->option);
update_temp_space(net, temp_space);
}
void init_softmax_layer(void* const net_, void* const layer_)
{
return;
}
void free_softmax_layer(void* const net_, void* const layer_)
{
return;
}
|
118a160f2b6dda7eb75fb85c5a43c84e255e079b.cu
|
#include "layers/operator.h"
#include <float.h>
#include <string.h>
// channel-wise softmax transform: bottom3d (N x C x D) -> top3d (N x C x D)
// top[n][c][d] = exp(bottom[n][c][d]) / sum_c(exp(bottom[n][c][d]))
// 1. to avoid that exp(...) goes to infinity, subtract max
// exp(x_c) / sum_c(exp(x_c))
// = ( exp(x_c) / exp(x_max) ) / sum_c( exp(x_c) / exp(x_max) )
// = exp(x_c - x_max) / sum_c(exp(x_c - x_max))
// 2. thus, the transform cosists of 5 steps:
// a. max[n][d] = max_c(bottom[n][c][d])
// b. sub[n][c][d] = bottom[n][c][d] - max[n][d]
// c. exp[n][c][d] = exp(sub[n][c][d])
// d. sum[n][d] = sum_c(exp[n][c][d])
// e. top[n][c][d] = exp[n][c][d] / sum[n][d]
// --------------------------------------------------------------------------
// kernel code
// channel_max_{gpu, cpu}
// subtract_max_{gpu, cpu}
// exp_{gpu, cpu}
// channel_sum_{gpu, cpu}
// div_sum_{gpu, cpu}
// softmax_inplace
// --------------------------------------------------------------------------
// compute max2d[n][d] = max_c(data3d[n][c][d])
#ifdef GPU
__global__
static
void channel_max_gpu(const real data3d[], real max2d[],
const int N, const int C, const int D)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
const long int max_size = N * D;
if (index < max_size) {
const int n = index / D;
const int d = index % D;
real maxval = -FLT_MAX;
for (int c = 0; c < C; ++c) {
const long int data_index = (n * C + c) * D + d;
maxval = MAX(maxval, data3d[data_index]);
}
max2d[index] = maxval;
}
}
#else
static
void channel_max_cpu(const real data3d[], real max2d[],
const int N, const int C, const int D)
{
for (int n = 0; n < N; ++n) {
for (int d = 0; d < D; ++d) {
const long int max_index = n * D + d;
real maxval = -FLT_MAX;
for (int c = 0; c < C; ++c) {
const long int data_index = (n * C + c) * D + d;
maxval = MAX(maxval, data3d[data_index]);
}
max2d[max_index] = maxval;
}
}
}
#endif
// in-place subtraction: data3d[n][c][d] = data3d[n][c][d] - max2d[n][d]
#ifdef GPU
__global__
static
void subtract_max_gpu(real data3d[], const real max2d[],
const int N, const int C, const int D)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
const long int data_size = N * C * D;
if (index < data_size) {
const int n = index / C / D;
const int d = index % D;
const long int max_index = n * D + d;
data3d[index] -= max2d[max_index];
}
}
#else
static
void subtract_max_cpu(real data3d[], const real max2d[],
const int N, const int C, const int D)
{
for (int n = 0; n < N; ++n) {
for (int c = 0; c < C; ++c) {
for (int d = 0; d < D; ++d) {
const long int data_index = (n * C + c) * D + d;
const long int max_index = n * D + d;
data3d[data_index] -= max2d[max_index];
} // endfor d
} // endfor c
} // endfor n
}
#endif
// in-place element-wise exp: data3d[n][c][d] = exp(data[n][c][d])
#ifdef GPU
__global__
static
void exp_gpu(real data[], const long int data_size)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < data_size) {
data[index] = exp(data[index]);
}
}
#else
static
void exp_cpu(real data[], const long int data_size)
{
for (long int index = 0; index < data_size; ++index) {
data[index] = exp(data[index]);
}
}
#endif
// compute sum2d[n][d] = sum_c(data3d[n][c][d])
#ifdef GPU
__global__
static
void channel_sum_gpu(const real data3d[], real sum2d[],
const int N, const int C, const int D)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
const long int sum_size = N * D;
if (index < sum_size) {
const int n = index / D;
const int d = index % D;
real sumval = 0;
for (int c = 0; c < C; ++c) {
const long int data_index = (n * C + c) * D + d;
sumval += data3d[data_index];
}
sum2d[index] = sumval;
}
}
#else
static
void channel_sum_cpu(const real data3d[], real sum2d[],
const int N, const int C, const int D)
{
for (int n = 0; n < N; ++n) {
for (int d = 0; d < D; ++d) {
const long int sum_index = n * D + d;
real sumval = 0;
for (int c = 0; c < C; ++c) {
const long int data_index = (n * C + c) * D + d;
sumval += data3d[data_index];
}
sum2d[sum_index] = sumval;
}
}
}
#endif
// in-place division: data3d[n][c][d] = data3d[n][c][d] / sum2d[n][d]
#ifdef GPU
__global__
static
void div_sum_gpu(real data3d[], const real sum2d[],
const int N, const int C, const int D)
{
const long int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < N * C * D) {
const int n = index / C / D;
const int d = index % D;
const long int sum_index = n * D + d;
data3d[index] /= sum2d[sum_index];
}
}
#else
static
void div_sum_cpu(real data3d[], const real sum2d[],
const int N, const int C, const int D)
{
for (int n = 0; n < N; ++n) {
for (int c = 0; c < C; ++c) {
for (int d = 0; d < D; ++d) {
const long int data_index = (n * C + c) * D + d;
const long int sum_index = n * D + d;
data3d[data_index] /= sum2d[sum_index];
} // endfor d
} // endfor c
} // endfor n
}
#endif
// channel-wise in-place softmax transform
// bottom[n][c][d] = exp(bottom[n][c][d]) / sum_c(exp(bottom[n][c][d]))
// bottom3d: N x C x D array
// temp_data: N * D array, temporary space for channel-wise sum or max
// e.g., temp_data[n][d] = sum_c(exp(bottom3d[n][c][d]))
static
void softmax_inplace(real bottom3d[], real temp_data[],
const int N, const int C, const int D)
{
// 1. max[n][d] = max_c(bottom[n][c][d])
{
#ifdef GPU
const long int num_threads = N * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
channel_max_gpu<<<num_blocks, threads_per_block>>>(
bottom3d, temp_data, N, C, D);
#else
channel_max_cpu(bottom3d, temp_data, N, C, D);
#endif
}
// 2. sub[n][c][d] = bottom[n][c][d] - max[n][d]
{
#ifdef GPU
const long int num_threads = N * C * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
subtract_max_gpu<<<num_blocks, threads_per_block>>>(
bottom3d, temp_data, N, C, D);
#else
subtract_max_cpu(bottom3d, temp_data, N, C, D);
#endif
}
// 3. exp[n][c][d] = exp(sub[n][c][d])
{
#ifdef GPU
const long int num_threads = N * C * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
exp_gpu<<<num_blocks, threads_per_block>>>(
bottom3d, num_threads);
#else
const long int data_size = N * C * D;
exp_cpu(bottom3d, data_size);
#endif
}
// 4. sum[n][d] = sum_c(exp[n][c][d])
{
#ifdef GPU
const long int num_threads = N * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
channel_sum_gpu<<<num_blocks, threads_per_block>>>(
bottom3d, temp_data, N, C, D);
#else
channel_sum_cpu(bottom3d, temp_data, N, C, D);
#endif
}
// 5. top[n][c][d] = exp[n][c][d] / sum[n][d]
{
#ifdef GPU
const long int num_threads = N * C * D;
const int threads_per_block = 512;
const int num_blocks = DIV_THEN_CEIL(num_threads, threads_per_block);
div_sum_gpu<<<num_blocks, threads_per_block>>>(
bottom3d, temp_data, N, C, D);
#else
div_sum_cpu(bottom3d, temp_data, N, C, D);
#endif
}
}
// --------------------------------------------------------------------------
// layer-wise operator code
// --------------------------------------------------------------------------
// channel-wise softmax transform: bottom3d (N x C x D) -> top3d (N x C x D)
// top[n][c][d] = exp(bottom[n][c][d]) / sum_c(exp(bottom[n][c][d]))
// option->channel_axis: axis index to be considered as "channel"
// e.g., option->channel_axis = 0 if bottom = C x H x W tensor
// N = product of shape[0, ..., option->channel_axis-1]
// C = shape[option->channel_axis]
// D = product of shape[option->channel_axis+1, ..., ndim-1]
// temp_data: N * D array, temporary space for channel-wise sum or max
// e.g., temp_data[n][d] = sum_c(exp(bottom[n][c][d]))
static
void softmax_forward(const Tensor* const bottom,
Tensor* const top,
real temp_data[],
const LayerOption* const option)
{
// copy bottom -> top, and then perform inplace operation
if (bottom->data != top->data) {
const long int data_size = get_data_size(bottom);
#ifdef GPU
cudaMemcpyAsync(top->data, bottom->data, data_size * sizeof(real),
cudaMemcpyDeviceToDevice);
#else
memcpy(top->data, bottom->data, data_size * sizeof(real));
#endif
}
// perform in-place softmax operation
for (int n = 0; n < bottom->num_items; ++n) {
real* const p_top_item = top->data + bottom->start[n];
const int C = bottom->shape[n][option->channel_axis];
int N = 1, D = 1;
for (int i = 0; i < option->channel_axis; ++i) {
N *= bottom->shape[n][i];
}
for (int i = option->channel_axis + 1; i < bottom->ndim; ++i) {
D *= bottom->shape[n][i];
}
softmax_inplace(p_top_item, temp_data, N, C, D);
}
}
// --------------------------------------------------------------------------
// output shape calculator code
// --------------------------------------------------------------------------
static
void softmax_shape(const Tensor* const bottom,
Tensor* const top,
long int* const p_temp_space,
const LayerOption* const option)
{
// top shape = bottom shape
if (bottom != top) {
top->ndim = bottom->ndim;
top->num_items = bottom->num_items;
for (int n = 0; n < bottom->num_items; ++n) {
for (int i = 0; i < bottom->ndim; ++i) {
top->shape[n][i] = bottom->shape[n][i];
}
}
for (int n = 0; n < bottom->num_items; ++n) {
top->start[n] = bottom->start[n];
}
}
// temporary space for channel-wise sum or max: N * D
{
int ND_max = 0;
for (int n = 0; n < bottom->num_items; ++n) {
int N = 1, D = 1;
for (int i = 0; i < option->channel_axis; ++i) {
N *= bottom->shape[n][i];
}
for (int i = option->channel_axis + 1; i < bottom->ndim; ++i) {
D *= bottom->shape[n][i];
}
ND_max = MAX(ND_max, N * D);
}
*p_temp_space = ND_max * sizeof(real);
}
}
// --------------------------------------------------------------------------
// functions for layer instance
// --------------------------------------------------------------------------
void forward_softmax_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
softmax_forward(get_bottom(layer, 0), get_top(layer, 0),
net->temp_data, &layer->option);
}
void shape_softmax_layer(void* const net_, void* const layer_)
{
Net* const net = (Net*)net_;
Layer* const layer = (Layer*)layer_;
long int temp_space;
softmax_shape(get_bottom(layer, 0), get_top(layer, 0),
&temp_space, &layer->option);
update_temp_space(net, temp_space);
}
void init_softmax_layer(void* const net_, void* const layer_)
{
return;
}
void free_softmax_layer(void* const net_, void* const layer_)
{
return;
}
|
7adec3676564e30e110db32f944c07b40eedbaf3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = blockIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
hipMalloc( &d_a, SIZE*sizeof(float));
hipMalloc( &d_b, SIZE*sizeof(float));
hipMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
hipMemcpy( d_a, a, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_b, b, SIZE*sizeof(float), hipMemcpyHostToDevice );
hipMemcpy( d_c, c, SIZE*sizeof(float), hipMemcpyHostToDevice );
start = clock();
hipLaunchKernelGGL(( VectorAdd), dim3(SIZE), dim3(1), 0, 0, d_a, d_b, d_c, SIZE);
end = clock();
hipMemcpy( c, d_c, SIZE*sizeof(float), hipMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
hipFree(d_a);
hipFree(d_b);
hipFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
}
|
7adec3676564e30e110db32f944c07b40eedbaf3.cu
|
#include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = blockIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
cudaMalloc( &d_a, SIZE*sizeof(float));
cudaMalloc( &d_b, SIZE*sizeof(float));
cudaMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
cudaMemcpy( d_a, a, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, SIZE*sizeof(float), cudaMemcpyHostToDevice );
start = clock();
VectorAdd<<<SIZE, 1>>>(d_a, d_b, d_c, SIZE);
end = clock();
cudaMemcpy( c, d_c, SIZE*sizeof(float), cudaMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
}
|
c44e159505ea87f403bc43a372c2199c7e4ff07a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_r1 [8][2];
static int dims_update_halo_kernel1_r1_h [8][2] = {0};
//user function
__device__
inline void update_halo_kernel1_r1_gpu(ACC<double> &density0,
ACC<double> &density1,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &pressure,
ACC<double> &viscosity,
ACC<double> &soundspeed,
const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(-1,0,0);
if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(-1,0,0);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(-1,0,0);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(-1,0,0);
if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(-1,0,0);
if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(-1,0,0);
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(-1,0,0);
}
__global__ void ops_update_halo_kernel1_r1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[0][0] * dims_update_halo_kernel1_r1[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[1][0] * dims_update_halo_kernel1_r1[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[2][0] * dims_update_halo_kernel1_r1[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[3][0] * dims_update_halo_kernel1_r1[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[4][0] * dims_update_halo_kernel1_r1[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[5][0] * dims_update_halo_kernel1_r1[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[6][0] * dims_update_halo_kernel1_r1[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel1_r1[0][0], dims_update_halo_kernel1_r1[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel1_r1[1][0], dims_update_halo_kernel1_r1[1][1], arg1);
ACC<double> argp2(dims_update_halo_kernel1_r1[2][0], dims_update_halo_kernel1_r1[2][1], arg2);
ACC<double> argp3(dims_update_halo_kernel1_r1[3][0], dims_update_halo_kernel1_r1[3][1], arg3);
ACC<double> argp4(dims_update_halo_kernel1_r1[4][0], dims_update_halo_kernel1_r1[4][1], arg4);
ACC<double> argp5(dims_update_halo_kernel1_r1[5][0], dims_update_halo_kernel1_r1[5][1], arg5);
ACC<double> argp6(dims_update_halo_kernel1_r1[6][0], dims_update_halo_kernel1_r1[6][1], arg6);
update_halo_kernel1_r1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_r1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,19)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(19,"update_halo_kernel1_r1");
OPS_kernels[19].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_update_halo_kernel1_r1_h[0][0] || ydim0 != dims_update_halo_kernel1_r1_h[0][1] || xdim1 != dims_update_halo_kernel1_r1_h[1][0] || ydim1 != dims_update_halo_kernel1_r1_h[1][1] || xdim2 != dims_update_halo_kernel1_r1_h[2][0] || ydim2 != dims_update_halo_kernel1_r1_h[2][1] || xdim3 != dims_update_halo_kernel1_r1_h[3][0] || ydim3 != dims_update_halo_kernel1_r1_h[3][1] || xdim4 != dims_update_halo_kernel1_r1_h[4][0] || ydim4 != dims_update_halo_kernel1_r1_h[4][1] || xdim5 != dims_update_halo_kernel1_r1_h[5][0] || ydim5 != dims_update_halo_kernel1_r1_h[5][1] || xdim6 != dims_update_halo_kernel1_r1_h[6][0] || ydim6 != dims_update_halo_kernel1_r1_h[6][1]) {
dims_update_halo_kernel1_r1_h[0][0] = xdim0;
dims_update_halo_kernel1_r1_h[0][1] = ydim0;
dims_update_halo_kernel1_r1_h[1][0] = xdim1;
dims_update_halo_kernel1_r1_h[1][1] = ydim1;
dims_update_halo_kernel1_r1_h[2][0] = xdim2;
dims_update_halo_kernel1_r1_h[2][1] = ydim2;
dims_update_halo_kernel1_r1_h[3][0] = xdim3;
dims_update_halo_kernel1_r1_h[3][1] = ydim3;
dims_update_halo_kernel1_r1_h[4][0] = xdim4;
dims_update_halo_kernel1_r1_h[4][1] = ydim4;
dims_update_halo_kernel1_r1_h[5][0] = xdim5;
dims_update_halo_kernel1_r1_h[5][1] = ydim5;
dims_update_halo_kernel1_r1_h[6][0] = xdim6;
dims_update_halo_kernel1_r1_h[6][1] = ydim6;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel1_r1, dims_update_halo_kernel1_r1_h, sizeof(dims_update_halo_kernel1_r1)));
}
int *arg7h = (int *)arg7.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[19].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel1_r1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[19].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[19].mpi_time += t2-t1;
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 19;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 19;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_r1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(19,"update_halo_kernel1_r1");
}
ops_enqueue_kernel(desc);
}
#endif
|
c44e159505ea87f403bc43a372c2199c7e4ff07a.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel1_r1 [8][2];
static int dims_update_halo_kernel1_r1_h [8][2] = {0};
//user function
__device__
inline void update_halo_kernel1_r1_gpu(ACC<double> &density0,
ACC<double> &density1,
ACC<double> &energy0,
ACC<double> &energy1,
ACC<double> &pressure,
ACC<double> &viscosity,
ACC<double> &soundspeed,
const int* fields) {
if(fields[FIELD_DENSITY0] == 1) density0(0,0,0) = density0(-1,0,0);
if(fields[FIELD_DENSITY1] == 1) density1(0,0,0) = density1(-1,0,0);
if(fields[FIELD_ENERGY0] == 1) energy0(0,0,0) = energy0(-1,0,0);
if(fields[FIELD_ENERGY1] == 1) energy1(0,0,0) = energy1(-1,0,0);
if(fields[FIELD_PRESSURE] == 1) pressure(0,0,0) = pressure(-1,0,0);
if(fields[FIELD_VISCOSITY] == 1) viscosity(0,0,0) = viscosity(-1,0,0);
if(fields[FIELD_SOUNDSPEED] == 1) soundspeed(0,0,0) = soundspeed(-1,0,0);
}
__global__ void ops_update_halo_kernel1_r1(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
const int* __restrict arg7,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[0][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[0][0] * dims_update_halo_kernel1_r1[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[1][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[1][0] * dims_update_halo_kernel1_r1[1][1];
arg2 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[2][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[2][0] * dims_update_halo_kernel1_r1[2][1];
arg3 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[3][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[3][0] * dims_update_halo_kernel1_r1[3][1];
arg4 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[4][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[4][0] * dims_update_halo_kernel1_r1[4][1];
arg5 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[5][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[5][0] * dims_update_halo_kernel1_r1[5][1];
arg6 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel1_r1[6][0] + idx_z * 1*1 * dims_update_halo_kernel1_r1[6][0] * dims_update_halo_kernel1_r1[6][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel1_r1[0][0], dims_update_halo_kernel1_r1[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel1_r1[1][0], dims_update_halo_kernel1_r1[1][1], arg1);
ACC<double> argp2(dims_update_halo_kernel1_r1[2][0], dims_update_halo_kernel1_r1[2][1], arg2);
ACC<double> argp3(dims_update_halo_kernel1_r1[3][0], dims_update_halo_kernel1_r1[3][1], arg3);
ACC<double> argp4(dims_update_halo_kernel1_r1[4][0], dims_update_halo_kernel1_r1[4][1], arg4);
ACC<double> argp5(dims_update_halo_kernel1_r1[5][0], dims_update_halo_kernel1_r1[5][1], arg5);
ACC<double> argp6(dims_update_halo_kernel1_r1[6][0], dims_update_halo_kernel1_r1[6][1], arg6);
update_halo_kernel1_r1_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6, arg7);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
#else
void ops_par_loop_update_halo_kernel1_r1_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
ops_arg arg7 = desc->args[7];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[8] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,8,range,19)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(19,"update_halo_kernel1_r1");
OPS_kernels[19].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 8,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
int xdim3 = args[3].dat->size[0];
int ydim3 = args[3].dat->size[1];
int xdim4 = args[4].dat->size[0];
int ydim4 = args[4].dat->size[1];
int xdim5 = args[5].dat->size[0];
int ydim5 = args[5].dat->size[1];
int xdim6 = args[6].dat->size[0];
int ydim6 = args[6].dat->size[1];
if (xdim0 != dims_update_halo_kernel1_r1_h[0][0] || ydim0 != dims_update_halo_kernel1_r1_h[0][1] || xdim1 != dims_update_halo_kernel1_r1_h[1][0] || ydim1 != dims_update_halo_kernel1_r1_h[1][1] || xdim2 != dims_update_halo_kernel1_r1_h[2][0] || ydim2 != dims_update_halo_kernel1_r1_h[2][1] || xdim3 != dims_update_halo_kernel1_r1_h[3][0] || ydim3 != dims_update_halo_kernel1_r1_h[3][1] || xdim4 != dims_update_halo_kernel1_r1_h[4][0] || ydim4 != dims_update_halo_kernel1_r1_h[4][1] || xdim5 != dims_update_halo_kernel1_r1_h[5][0] || ydim5 != dims_update_halo_kernel1_r1_h[5][1] || xdim6 != dims_update_halo_kernel1_r1_h[6][0] || ydim6 != dims_update_halo_kernel1_r1_h[6][1]) {
dims_update_halo_kernel1_r1_h[0][0] = xdim0;
dims_update_halo_kernel1_r1_h[0][1] = ydim0;
dims_update_halo_kernel1_r1_h[1][0] = xdim1;
dims_update_halo_kernel1_r1_h[1][1] = ydim1;
dims_update_halo_kernel1_r1_h[2][0] = xdim2;
dims_update_halo_kernel1_r1_h[2][1] = ydim2;
dims_update_halo_kernel1_r1_h[3][0] = xdim3;
dims_update_halo_kernel1_r1_h[3][1] = ydim3;
dims_update_halo_kernel1_r1_h[4][0] = xdim4;
dims_update_halo_kernel1_r1_h[4][1] = ydim4;
dims_update_halo_kernel1_r1_h[5][0] = xdim5;
dims_update_halo_kernel1_r1_h[5][1] = ydim5;
dims_update_halo_kernel1_r1_h[6][0] = xdim6;
dims_update_halo_kernel1_r1_h[6][1] = ydim6;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel1_r1, dims_update_halo_kernel1_r1_h, sizeof(dims_update_halo_kernel1_r1)));
}
int *arg7h = (int *)arg7.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg7.data = OPS_consts_h + consts_bytes;
arg7.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg7.data)[d] = arg7h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[8];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
(start[1] * args[2].stencil->stride[1]);
base2 = base2+ dat2 *
args[2].dat->size[0] *
args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
(start[1] * args[3].stencil->stride[1]);
base3 = base3+ dat3 *
args[3].dat->size[0] *
args[3].dat->size[1] *
(start[2] * args[3].stencil->stride[2]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
(start[1] * args[4].stencil->stride[1]);
base4 = base4+ dat4 *
args[4].dat->size[0] *
args[4].dat->size[1] *
(start[2] * args[4].stencil->stride[2]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
(start[1] * args[5].stencil->stride[1]);
base5 = base5+ dat5 *
args[5].dat->size[0] *
args[5].dat->size[1] *
(start[2] * args[5].stencil->stride[2]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
(start[1] * args[6].stencil->stride[1]);
base6 = base6+ dat6 *
args[6].dat->size[0] *
args[6].dat->size[1] *
(start[2] * args[6].stencil->stride[2]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 8);
ops_halo_exchanges(args,8,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[19].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel1_r1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6], (int *)arg7.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[19].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 8);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
ops_set_halo_dirtybit3(&args[2],range);
ops_set_halo_dirtybit3(&args[3],range);
ops_set_halo_dirtybit3(&args[4],range);
ops_set_halo_dirtybit3(&args[5],range);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[19].mpi_time += t2-t1;
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[19].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel1_r1(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6, ops_arg arg7) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 19;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 19;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 8;
desc->args = (ops_arg*)malloc(8*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->args[7] = arg7;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg7.data,NUM_FIELDS*sizeof(int));
desc->args[7].data = tmp;
desc->function = ops_par_loop_update_halo_kernel1_r1_execute;
if (OPS_diags > 1) {
ops_timing_realloc(19,"update_halo_kernel1_r1");
}
ops_enqueue_kernel(desc);
}
#endif
|
b2997e06f91beaa5b95b836dcd380e7549ab32eb.hip
|
// !!! This is a file automatically generated by hipify!!!
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <legacy/NativeOpExecutioner.h>
#include <legacy/NativeOps.h>
#include <hip/hip_runtime.h>
#include <system/buffer.h>
#include <loops/transform_any.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
#include <loops/scalar.h>
#include <helpers/threshold.h>
#include <ops/specials_cuda.h>
#include <helpers/DebugHelper.h>
#include <execution/AffinityManager.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <helpers/CudaLaunchHelper.h>
#include <graph/GraphExecutioner.h>
#include <helpers/BlasHelper.h>
#include <graph/GraphHolder.h>
#include <ops/declarable/CustomOperations.h>
#include <helpers/PointersManager.h>
//#include <sys/time.h>
#include <hiprand/hiprand.h>
#include <graph/Status.h>
#include <helpers/DebugHelper.h>
using namespace sd;
#include <loops/special_kernels.h>
#include <performance/benchmarking/FullBenchmarkSuit.h>
#include <performance/benchmarking/LightBenchmarkSuit.h>
hipDeviceProp_t *deviceProperties;
hipFuncAttributes *funcAttributes = new hipFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __ND4J_EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
sd::buffer::Buffer<Nd4jLong> * createScalarBuffer(hipStream_t stream) {
auto scalarShapeInfo = shape::createScalarShapeInfo();
auto buff = sd::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
sd::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
sd::buffer::Buffer<Nd4jLong> *scalarDimension;
sd::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(hipStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong));
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
sd::buffer::freeBuffer(&scalarShapeInfo);
sd::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
sd::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
hipStream_t streamRef;
public:
ScalarInfo(hipStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = sd::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
sd::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
sd::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the dZ pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
sd::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void execPairwiseTransform( Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseTransform(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execPairwiseTransformBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsScalar(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execBroadcastBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcastBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void execBroadcast(
Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcast(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloatScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSameScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSame(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceLong(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction,
::execReduceScalar(launchDims, stream, opNum,
dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(), hXShapeInfo,
extraParams,
dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(), hXShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL)
throw std::runtime_error("execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceScalar(launchDims, stream, opNum,
dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(), hXShapeInfo,
extraParams,
dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(), hZShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduce(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduce(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
(int *) dbDimension->special(), dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloat(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduceScalar(
Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo){
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduceScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformSame(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformSame(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformBool(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformAny(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto streamSpecial = reinterpret_cast<hipStream_t &>(extraPointers[4]);
LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3],
reinterpret_cast<int *>(extraPointers[6]));
NativeOpExecutioner::execTransformAny(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
nullptr, nullptr);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformStrict(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformStrict(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformFloat(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformFloat(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void checkP2P() {
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX , dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
hipSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
hipGetDevice(&curDevice);
int devCnt = 0;
hipGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
hipSetDevice(dX);
hipDeviceCanAccessPeer(&canAccess, dX , dY);
if (canAccess) {
if (enable) {
hipDeviceEnablePeerAccess(dY, 0);
} else {
hipDeviceDisablePeerAccess(dY);
}
} else {
if (sd::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
hipSetDevice(curDevice);
}
allowedP2P = enable;
hipSetDevice(curDevice);
}
bool isP2PAvailable() {
return supportedP2P;
}
void initializeDevicesAndFunctions() {
try {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
deviceProperties = new hipDeviceProp_t[devCnt];
for (int i = 0; i < devCnt; i++) {
hipSetDevice(i);
hipGetDeviceProperties(&deviceProperties[i], i);
hipDeviceSetLimit(hipLimitStackSize, 4096);
}
hipSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void initializeFunctions(Nd4jPointer *functions) {
sd::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
hipblasSgemv = (CublasSgemv)functions[0];
hipblasDgemv = (CublasDgemv)functions[1];
hipblasHgemm = (CublasHgemm)functions[2];
hipblasSgemm = (CublasSgemm)functions[3];
hipblasDgemm = (CublasDgemm)functions[4];
cublasSgemmEx = (CublasSgemmEx)functions[5];
hipblasHgemmBatched = (CublasHgemmBatched)functions[6];
hipblasSgemmBatched = (CublasSgemmBatched)functions[7];
hipblasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// hipHostMallocMapped |hipHostMallocPortable
auto res = hipHostMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8, hipHostMallocDefault);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostMalloc failed");
}
return reinterpret_cast<int8_t*>(pointer);
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) {
Nd4jPointer pointer;
auto res = hipMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMalloc failed");
}
return reinterpret_cast<int8_t*>(pointer);
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int freeHost(Nd4jPointer pointer) {
auto res = hipHostFree(reinterpret_cast<void *>(pointer));
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipHostFree failed");
}
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int freeDevice(Nd4jPointer pointer, int deviceId) {
auto res = hipFree(reinterpret_cast<void *>(pointer));
// we're intentionally skipping
if (res != 0 && res != 1) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipFree failed");
}
return res == 0 ? 1L : 0L;
}
Nd4jPointer createContext() {
return 0L;
}
Nd4jPointer createStream() {
auto stream = new hipStream_t();
auto dZ = hipStreamCreate(stream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamCreate failed");
}
return stream;
}
Nd4jPointer createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(hipEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(hipEvent_t));
auto dZ = hipEventCreateWithFlags(reinterpret_cast<hipEvent_t *>(&nativeEvent), hipEventDisableTiming);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventCreateWithFlags failed");
}
return nativeEvent;
}
int registerEvent(Nd4jPointer event, Nd4jPointer stream) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto pStream = reinterpret_cast<hipStream_t *>(stream);
auto dZ = hipEventRecord(*pEvent, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventRecord failed");
}
return 1;
}
int setDevice(int deviceId) {
AffinityManager::setCurrentDevice(deviceId);
return 1;
}
Nd4jLong getDeviceFreeMemoryDefault() {
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
return (Nd4jLong) memFree;
}
Nd4jLong getDeviceFreeMemory(int device) {
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong getDeviceTotalMemory(int device) {
int orig = -1;
hipGetDevice(&orig);
if (device >= 0 && device != orig) {
hipSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
hipMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
hipSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipMemcpyKind kind;
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
break;
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpy failed");
return 0;
}
return 1;
}
int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto pStream = reinterpret_cast<hipStream_t *>(reserved);
hipMemcpyKind kind;
//sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed");
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
break;
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = hipMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
//auto dZ = hipMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyAsync failed");
return 0;
}
return 1;
}
int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto dZ = hipMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemset failed");
}
return 1;
}
int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto pStream = reinterpret_cast<hipStream_t *>(reserved);
auto dZ = hipMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemsetAsync failed");
}
return 1;
}
int destroyEvent(Nd4jPointer event) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto dZ = hipEventDestroy(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventDestroy failed");
}
return 1;
}
int streamSynchronize(Nd4jPointer stream) {
auto pStream = reinterpret_cast<hipStream_t *>(stream);
auto dZ = hipStreamSynchronize(*pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipStreamSynchronize failed");
}
return 1L;
}
int eventSynchronize(Nd4jPointer event) {
auto pEvent = reinterpret_cast<hipEvent_t *>(&event);
auto dZ = hipEventSynchronize(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipEventSynchronize failed");
}
return 1L;
}
int getAvailableDevices() {
int devCnt = 0;
hipGetDeviceCount(&devCnt);
return devCnt;
}
void enableDebugMode(bool reallyEnable) {
sd::Environment::getInstance()->setDebug(reallyEnable);
}
void setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int ompGetMaxThreads() {
return maxThreads;
}
int ompGetNumThreads() {
return maxThreads;
}
void setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void enableVerboseMode(bool reallyEnable) {
sd::Environment::getInstance()->setVerbose(reallyEnable);
}
int getDeviceMajor(int device) {
return deviceProperties[device].major;
}
int getDeviceMinor(int device) {
return deviceProperties[device].minor;
}
const char * getDeviceName(int device) {
return deviceProperties[device].name;
}
void specialConcat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
void *dZ,
Nd4jLong const* dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
try {
BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods,
::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo),
LIBND4J_TYPES);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
* This method saves
*/
sd::TadPack* tadOnlyShapeInfo(Nd4jLong const* dXShapeInfo, int *dimension, int dimensionLength) {
try {
auto pack = new TadPack();
*pack = sd::ConstantTadHelper::getInstance()->tadForDimensions(dXShapeInfo, dimension, dimensionLength);
return pack;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong const* getPrimaryShapeInfo(sd::TadPack* pack) {
return pack->primaryShapeInfo();
}
Nd4jLong const* getPrimaryOffsets(sd::TadPack* pack) {
return pack->primaryOffsets();
}
Nd4jLong const* getSpecialShapeInfo(sd::TadPack* pack) {
return pack->specialShapeInfo();
}
Nd4jLong const* getSpecialOffsets(sd::TadPack* pack) {
return pack->specialOffsets();
}
Nd4jLong getNumberOfTads(sd::TadPack* pack) {
return pack->numberOfTads();
}
int getShapeInfoLength(sd::TadPack* pack) {
return pack->shapeInfoLength();
}
int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
hipStream_t *pStream = reinterpret_cast<hipStream_t *>(reserved);
hipMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = hipMemcpyHostToHost;
}
break;
case 1: {
kind = hipMemcpyHostToDevice;
}
break;
case 2: {
kind = hipMemcpyDeviceToHost;
}
case 3: {
kind = hipMemcpyDeviceToDevice;
}
break;
}
auto dZ = hipMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipMemcpyToSymbolAsync failed");
}
return 1;
}
Nd4jPointer getConstantSpace() {
Nd4jPointer dConstAddr;
hipError_t dZ = hipGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("hipGetSymbolAddress failed");
}
return dConstAddr;
}
void pullRows(Nd4jPointer *extraPointers,
OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* zShapeInfo, Nd4jLong const* dZShapeInfo,
Nd4jLong n,
Nd4jLong *indexes,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets,
Nd4jLong const* zTadShapeInfo,
Nd4jLong const* zTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric,
(launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets),
LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void average(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong const* xShapeInfo,
Nd4jPointer *dx, Nd4jLong const* dXShapeInfo,
void *z, Nd4jLong const* zShapeInfo,
void *dz, Nd4jLong const* dzShapeInfo,
int n,
Nd4jLong length,
bool propagate) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate),
LIBND4J_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void accumulate(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong const* xShapeInfo,
Nd4jPointer *dx, Nd4jLong const* dXShapeInfo,
void *z, Nd4jLong const* zShapeInfo,
void *dz, Nd4jLong const* dzShapeInfo,
int n,
Nd4jLong length) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length),
LIBND4J_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void shuffle(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jPointer *xShapeInfo,
Nd4jPointer *dx, Nd4jPointer *dXShapeInfo,
Nd4jPointer *z, Nd4jPointer *zShapeInfo,
Nd4jPointer *dz, Nd4jPointer *dZShapeInfo,
int N,
int *shuffleMap,
Nd4jPointer *tadShapeInfo,
Nd4jPointer *tadOffsets) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<Nd4jLong**>(xShapeInfo);
auto dxShape = reinterpret_cast<Nd4jLong**>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
auto xType = sd::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric,
(launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
bool isExperimentalEnabled() {
return sd::Environment::getInstance()->isExperimentalBuild();
}
void setOmpMinThreads(int threads) {
minThreads = sd::math::nd4j_max<int>(32, threads);
minThreads = sd::math::nd4j_min<int>(maxThreads, minThreads);
}
int getDevice() {
return sd::AffinityManager::currentDeviceId();
}
void setElementThreshold(int num) {
// this is no-op for CUDA
}
void setTADThreshold(int num) {
// this is no-op for CUDA
}
////////////////////////////////////////////////////////////////////////
void execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
bool biasCorrected,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength,
tadShapeInfo, tadOffsets,
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Tad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* yTadOnlyShapeInfo, Nd4jLong const* yTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
auto tadLength = shape::length(tadPack.primaryShapeInfo());
auto yLength = shape::length(hYShapeInfo);
auto xLength = shape::length(hXShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
if (tadLength == yLength || tadLength == xLength) {
// nd4j_printf("== way\n","");
NativeOpExecutioner::execReduce3(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
} else
NativeOpExecutioner::execReduce3TAD(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3Scalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hScalarShapeInfo).specialAsT<Nd4jLong>(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBoolTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dbScalars->primary(), hScalarShapeInfo, dbScalars->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hScalarShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalar(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hScalarShapeInfo).specialAsT<Nd4jLong>(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled())
throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(), dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(), dbScalars->special(), extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void execAggregate(Nd4jPointer *extraPointers,
int opNum,
void **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
void *realArguments,
int numRealArguments,
sd::DataType dtype) {
}
void batchExecutor(Nd4jPointer *extraPointers,
int numAggregates,
int opNum,
int maxArgs,
int maxShapes,
int maxIntArrays,
int maxIntArraySize,
int maxIdx,
int maxReals,
void *ptrToArguments,
sd::DataType dtype) {
}
void execAggregateBatch(Nd4jPointer *extraPointers,
int numAggregates, int opNum,
int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize,
int maxIdx, int maxReals,
void *ptrToArguments, sd::DataType dtype) {
}
////////////////////////////////////////////////////////////////////////
void execRandom(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer stateHost,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// hipStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
hipMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, hipMemcpyHostToDevice, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void destroyRandom(Nd4jPointer ptrBuffer) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
hipDeviceSynchronize();
delete buffer;
}
void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
hipStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
hipMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, hipMemcpyHostToDevice, *stream);
}
void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom);
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
hipStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void tear(Nd4jPointer *extras,
OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo,
Nd4jPointer *targets,
Nd4jLong const* zShapeInfo,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({}, {dbX});
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric,
(launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
InteropDataBuffer::registerSpecialUse({}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<hipStream_t *>(extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = sd::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (sd::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = sd::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = sd::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
hipLaunchKernelGGL(( sd::uniformAdd), dim3(grid), dim3(threads), 1024, *stream, dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
hipLaunchKernelGGL(( sd::uniformAdd), dim3(1), dim3(numThreadsLastBlock), 1024, *stream, dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
////////////////////////////////////////////////////////////////////////
void execReduce3All(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParamsVals,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* xTadShapeInfo, Nd4jLong const* xOffsets,
Nd4jLong const* yTadShapeInfo, Nd4jLong const* yOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3All(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParamsVals,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength,
xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sort(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
bool descending) {
try {
hipStream_t *stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric,
(launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric,
(launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
sd::DebugHelper::checkErrorCode(stream, "sort(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByKey(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo))
return;
if (xLength != yLength)
throw std::runtime_error("sortByKey: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByValue(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo))
return;
if (xLength != yLength)
throw std::runtime_error("sortByValue: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByKey(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
int *dimension,
int dimensionLength,
bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
LIBND4J_TYPES, LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByValue(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
int *dimension,
int dimensionLength,
bool descending) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
LIBND4J_TYPES, LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTad(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets,
bool descending) {
try {
// to be implemented
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, oesTadGeneric,
(launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
sd::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
try {
return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getResultWrapperSize(sd::graph::ResultWrapper* ptr) {
return ptr->size();
}
Nd4jPointer getResultWrapperPointer(sd::graph::ResultWrapper* ptr) {
return ptr->pointer();
}
const char* getAllCustomOps() {
return sd::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) {
sd::graph::VariableSpace varSpace;
Context block(2, &varSpace);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++)
block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numDArgs; e++)
block.getDArguments()->push_back((sd::DataType) dArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes];
auto array = new sd::NDArray(buffer_, bufferD_, shape_);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.launchContext()->getWorkspace() != nullptr)
shapeList->detach();
return shapeList;
}
sd::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs,
iArgs, numIArgs, bArgs, numBArgs, dArgs, numDArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
Context block(1);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
sd::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getShapeListSize(sd::ShapeList* list) {
return list->size();
}
Nd4jLong const* getShape(sd::ShapeList* list, Nd4jLong i) {
return list->at(i);
}
static FORCEINLINE Nd4jStatus realExec(sd::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<sd::NDArray*> inputs(numInputs);
std::vector<sd::NDArray*> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs];
inputs[e] = new sd::NDArray(buffer, bufferD, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify && buffer != nullptr)
memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new sd::NDArray(buffer, bufferD, shape);
outputs[e] = array;
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++)
bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace);
//auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
try {
auto op = sd::ops::OpRegistrator::getInstance()->getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) {
try {
auto op = sd::ops::OpRegistrator::getInstance()->getOperation(hash);
auto context = reinterpret_cast<Context *>(opContext);
auto result = op->execute(context);
auto res = hipStreamSynchronize(*context->launchContext()->getCudaStream());
if (res != 0)
throw sd::cuda_exception::build("customOp execution failed", res);
for (auto v:context->fastpath_in()) {
if (!v->isEmpty())
v->syncToDevice();
}
for (auto v:context->fastpath_out()) {
if (!v->isEmpty())
v->syncToDevice();
}
return result;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
try {
auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
sd::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = sd::graph::GraphHolder::getInstance()->pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<sd::NDArray*> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new sd::graph::VariablesSet(dZ);
if (dZ == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
try {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getVariablesSetSize(sd::graph::VariablesSet* set) {
return set->size();
}
Nd4jStatus getVariablesSetStatus(sd::graph::VariablesSet* set) {
return set->status();
}
sd::graph::Variable* getVariable(sd::graph::VariablesSet* set, Nd4jLong i) {
return set->at(i);
}
int getVariableId(sd::graph::Variable* variable) {
return variable->id();
}
int getVariableIndex(sd::graph::Variable* variable) {
return variable->index();
}
const char* getVariableName(sd::graph::Variable* variable) {
return variable->getName()->c_str();
}
Nd4jLong const* getVariableShape(sd::graph::Variable* variable) {
return variable->getNDArray()->shapeInfo();
}
void* getVariableBuffer(sd::graph::Variable* variable) {
return variable->getNDArray()->buffer();
}
int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
try {
sd::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
void deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void deleteCharArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<char *>(pointer);
delete[] ptr;
}
void deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
void deleteVariablesSet(sd::graph::VariablesSet* pointer) {
delete pointer;
}
void deleteShapeList(Nd4jPointer shapeList) {
sd::ShapeList* list = reinterpret_cast<sd::ShapeList*>(shapeList);
//list->destroy();
delete list;
}
const char* getAllOperations() {
return sd::OpTracker::getInstance()->exportOperations();
}
Nd4jPointer getGraphState(Nd4jLong id) {
return (Nd4jPointer) new sd::graph::GraphState(id);
}
void deleteGraphState(Nd4jPointer state) {
auto stateP = reinterpret_cast<sd::graph::GraphState*>(state);
delete stateP;
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, sd::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
// nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK())
return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
try {
return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes,
numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
void deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr);
delete p;
}
int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong const* dXShapeInfo, int N, float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ);
*/
void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) {
try {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
//sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<sd::int8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//sd::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//sd::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//sd::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) {
auto u = new sd::utf8string(string, length);
return reinterpret_cast<Nd4jPointer>(u);
}
Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
return reinterpret_cast<sd::utf8string*>(ptr)->_length;
}
char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
return reinterpret_cast<sd::utf8string*>(ptr)->_buffer;
}
void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
delete(reinterpret_cast<sd::utf8string*>(ptr));
}
///////////////////////////////////////////////////////////////////
template<typename T, typename I>
__global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs,
void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong *xOffsets,
void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets,
const void* vindexes) {
__shared__ T *x, *y;
__shared__ Nd4jLong arrLenX, arrLenY;
auto indexes = reinterpret_cast<const I*>(vindexes);
for (int e = 0; e < numOfSubArrs; e++ ) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T*>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY)
return;
for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template<typename T, typename I>
__host__ static void scatterUpdateCudaLauncher(const hipStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong const* xShapeInfo, const Nd4jLong* xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* indexes) {
hipLaunchKernelGGL(( scatterUpdateCuda<T, I>), dim3(512), dim3(256), MAX_NUM_THREADS, *stream, opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs,
void* hX, Nd4jLong const* hXShapeInfo, Nd4jLong const* hXOffsets,
void* dX, Nd4jLong const* dXShapeInfo, Nd4jLong const* dXOffsets,
void* hY, Nd4jLong const* hYShapeInfo, Nd4jLong const* hYOffsets,
void* dY, Nd4jLong const* dYShapeInfo, Nd4jLong const* dYOffsets,
void* hIindexes, Nd4jLong const* hIndicesShapeInfo, void* dIindexes, Nd4jLong const* dIndicesShapeInfo) {
try {
auto stream = reinterpret_cast<hipStream_t *>(extraPointers[1]);
auto type = ArrayOptions::dataType(hXShapeInfo);
auto iType = ArrayOptions::dataType(hIndicesShapeInfo);
BUILD_DOUBLE_SELECTOR(type, iType, scatterUpdateCudaLauncher,
(stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes),
LIBND4J_TYPES, INDEXING_TYPES);
sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) {
try {
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo);
NDArray array(buffer, specialBuffer, shapeInfo, &lc);
sd::DebugHelper::retrieveDebugStatistics(p, &array);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void __global__ tryPointerKernel(void* p, int len) {
auto buf = reinterpret_cast<int8_t*>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len)
atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x ==0 && blockIdx.x == 0)
printf("Pointer check complete: %i\n", b);
}
void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) {
try {
hipStream_t stream;
hipStreamCreate(&stream);
tryPointerKernel << < 256, 512, len + 64, stream >> > (p, len);
auto e = hipStreamSynchronize(stream);
if (e != 0)
throw sd::cuda_exception::build("tryPointer failed", e);
hipStreamDestroy(stream);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
int dataTypeFromNpyHeader(void *header) {
return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header));
}
sd::ConstantDataBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, sd::DataType dtype, char order, Nd4jLong ews, bool empty) {
try {
auto buffer = new ConstantDataBuffer();
*buffer = sd::ConstantShapeHelper::getInstance()->bufferForShapeInfo(
ShapeDescriptor(dtype, order, shape, strides, rank, ews, empty));
return buffer;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
void deleteShapeBuffer(sd::ConstantDataBuffer* ptr) {
delete ptr;
}
void deleteTadPack(sd::TadPack* ptr) {
delete ptr;
}
bool isBlasVersionMatches(int major, int minor, int build) {
auto result = major == Environment::getInstance()->_blasMajorVersion && minor == Environment::getInstance()->_blasMinorVersion && build == Environment::getInstance()->_blasPatchVersion;
if (!result) {
nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()->_blasMajorVersion, Environment::getInstance()->_blasMinorVersion, Environment::getInstance()->_blasPatchVersion, major, minor, build);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch");
}
return result;
}
sd::ConstantDataBuffer* constantBufferLong(sd::DataType dtype, Nd4jLong const* data, int length) {
return sd::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer* constantBufferDouble(sd::DataType dtype, double *data, int length) {
return sd::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer* constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) {
return sd::ConstantHelper::getInstance()->constantBuffer(*descriptor, dtype);
}
Nd4jPointer getConstantDataBufferPrimary(sd::ConstantDataBuffer* dbf) {
return dbf->primary();
}
Nd4jPointer getConstantDataBufferSpecial(sd::ConstantDataBuffer* dbf) {
return dbf->special();
}
Nd4jLong getConstantDataBufferLength(sd::ConstantDataBuffer* dbf) {
return dbf->length();
}
Nd4jLong getConstantDataBufferSizeOf(sd::ConstantDataBuffer* dbf) {
return dbf->sizeOf();
}
sd::graph::Context* createGraphContext(int nodeId) {
return new sd::graph::Context(nodeId);
}
sd::graph::RandomGenerator* getGraphContextRandomGenerator(sd::graph::Context* ptr) {
return &ptr->randomGenerator();
}
void markGraphContextInplace(sd::graph::Context* ptr, bool reallyInplace) {
ptr->markInplace(reallyInplace);
}
void setGraphContextCudaContext(sd::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) {
ptr->setCudaContext(stream, reductionPointer, allocationPointer);
}
void setGraphContextInputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextOutputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextInputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextOutputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextTArguments(sd::graph::Context* ptr, double *arguments, int numberOfArguments) {
ptr->setTArguments(arguments, numberOfArguments);
}
void setGraphContextIArguments(sd::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) {
ptr->setIArguments(arguments, numberOfArguments);
}
void setGraphContextBArguments(sd::graph::Context* ptr, bool *arguments, int numberOfArguments) {
ptr->setBArguments(arguments, numberOfArguments);
}
void setGraphContextDArguments(OpaqueContext* ptr, int *arguments, int numberOfArguments) {
std::vector<sd::DataType> dtypes(numberOfArguments);
for (int e = 0; e < numberOfArguments; e++)
dtypes[e] = (sd::DataType) arguments[e];
ptr->setDArguments(dtypes);
}
void deleteGraphContext(sd::graph::Context* ptr) {
delete ptr;
}
sd::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) {
try {
return new sd::graph::RandomGenerator(rootSeed, nodeSeed);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getRandomGeneratorRootState(sd::graph::RandomGenerator* ptr) {
return ptr->rootState();
}
Nd4jLong getRandomGeneratorNodeState(sd::graph::RandomGenerator* ptr) {
return ptr->nodeState();
}
void setRandomGeneratorStates(sd::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) {
ptr->setStates(rootSeed, nodeSeed);
}
int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeInt(index);
}
Nd4jLong getRandomGeneratorRelativeLong(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeLong(index);
}
void deleteRandomGenerator(sd::graph::RandomGenerator* ptr) {
delete ptr;
}
Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) {
try {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int shapeSize = arr.shape.size();
std::vector<Nd4jLong> shape(shapeSize);
bool _empty = false;
for (unsigned int i = 0; i < shapeSize; i++) {
shape[i] = arr.shape[i];
if (arr.shape[i] == 0)
_empty = true;
}
auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray));
Nd4jLong *shapeBuffer;
if (shape.size() == 1 && shape[0] == 0) {
// scalar case
shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype);
} else if (_empty) {
if (shapeSize > 0)
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
else
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype);
} else {
shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
}
return (Nd4jPointer)(sd::ConstantShapeHelper::getInstance()->createFromExisting(shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
const char* runLightBenchmarkSuit(bool printOut) {
try {
sd::LightBenchmarkSuit suit;
auto result = suit.runSuit();
if (printOut)
nd4j_printf("%s\n", result.data());
auto chars = new char[result.length() + 1];
std::memcpy(chars, result.data(), result.length());
chars[result.length()] = (char) 0x0;
return chars;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
const char* runFullBenchmarkSuit(bool printOut) {
try {
sd::FullBenchmarkSuit suit;
auto result = suit.runSuit();
if (printOut)
nd4j_printf("%s\n", result.data());
auto chars = new char[result.length() + 1];
std::memcpy(chars, result.data(), result.length());
chars[result.length()] = (char) 0x0;
return chars;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getCachedMemory(int deviceId) {
return sd::ConstantHelper::getInstance()->getCachedAmount(deviceId);
}
sd::LaunchContext* defaultLaunchContext() {
return LaunchContext::defaultContext();
}
Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) {
return lc->getScalarPointer();
}
Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) {
return lc->getReductionPointer();
}
Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) {
return lc->getAllocationPointer();
}
Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) {
return lc->getCudaStream();
}
Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) {
return lc->getCudaSpecialStream();
}
Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) {
return lc->getCublasHandle();
}
Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) {
return lc->getCusolverHandle();
}
int lastErrorCode() {
return sd::LaunchContext::defaultContext()->errorReference()->errorCode();
}
const char* lastErrorMessage() {
return sd::LaunchContext::defaultContext()->errorReference()->errorMessage();
}
void ctxShapeFunctionOverride(OpaqueContext* ptr, bool reallyOverride) {
ptr->setShapeFunctionOverride(reallyOverride);
}
void ctxPurge(OpaqueContext* ptr) {
ptr->clearFastPath();
}
int binaryLevel() {
return 0;
}
int optimalLevel() {
return 0;
}
bool isMinimalRequirementsMet() {
return true;
}
bool isOptimalRequirementsMet() {
return true;
}
void ctxAllowHelpers(OpaqueContext* ptr, bool reallyAllow) {
ptr->allowHelpers(reallyAllow);
}
void ctxSetExecutionMode(OpaqueContext* ptr, int execMode) {
if (execMode < 0 || execMode > 2)
execMode = 0;
ptr->setExecutionMode((samediff::ExecutionMode) execMode);
}
OpaqueDataBuffer* dbCreateExternalDataBuffer(Nd4jLong elements, int dataType, Nd4jPointer primary, Nd4jPointer special) {
auto buffer = dbAllocateDataBuffer(0, dataType, false);
if (primary != nullptr)
buffer->setPrimary(primary, elements);
if (special != nullptr)
buffer->setSpecial(special, elements);
return buffer;
}
OpaqueDataBuffer* dbAllocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) {
return allocateDataBuffer(elements, dataType, allocateBoth);
}
OpaqueDataBuffer* allocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) {
try {
auto dtype = DataTypeUtils::fromInt(dataType);
return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jPointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->primary();
}
Nd4jPointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->special();
}
void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) {
delete dataBuffer;
}
void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer primaryBuffer, Nd4jLong numBytes) {
dataBuffer->setPrimary(primaryBuffer, numBytes);
}
void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer specialBuffer, Nd4jLong numBytes) {
dataBuffer->setSpecial(specialBuffer, numBytes);
}
void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->allocatePrimary();
}
void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->allocateSpecial();
}
void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) {
try {
dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType()));
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
OpaqueDataBuffer* dbCreateView(OpaqueDataBuffer *dataBuffer, Nd4jLong length, Nd4jLong offset) {
return new InteropDataBuffer(*dataBuffer, length, offset);
}
void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->syncToSpecial();
}
void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->syncToPrimary(nullptr);
}
void dbTickHostRead(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->readPrimary();
}
void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->writePrimary();
}
void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->readSpecial();
}
void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->writeSpecial();
}
void dbExpand(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) {
dataBuffer->expand(elements);
}
void dbClose(OpaqueDataBuffer *dataBuffer) {
dataBuffer->getDataBuffer()->close();
}
int dbDeviceId(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->deviceId();
}
void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) {
dataBuffer->setDeviceId(deviceId);
}
int dbLocality(OpaqueDataBuffer *dataBuffer) {
auto p = dataBuffer->dataBuffer()->isPrimaryActual();
auto d = dataBuffer->dataBuffer()->isSpecialActual();
if (p && d)
return 0;
else if (p)
return -1;
else
return 1;
}
|
b2997e06f91beaa5b95b836dcd380e7549ab32eb.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
#include <legacy/NativeOpExecutioner.h>
#include <legacy/NativeOps.h>
#include <cuda.h>
#include <system/buffer.h>
#include <loops/transform_any.h>
#include <loops/reduce_bool.h>
#include <loops/reduce_long.h>
#include <loops/scalar.h>
#include <helpers/threshold.h>
#include <ops/specials_cuda.h>
#include <helpers/DebugHelper.h>
#include <execution/AffinityManager.h>
#include <exceptions/datatype_exception.h>
#include <exceptions/cuda_exception.h>
#include <helpers/CudaLaunchHelper.h>
#include <graph/GraphExecutioner.h>
#include <helpers/BlasHelper.h>
#include <graph/GraphHolder.h>
#include <ops/declarable/CustomOperations.h>
#include <helpers/PointersManager.h>
//#include <sys/time.h>
#include <curand.h>
#include <graph/Status.h>
#include <helpers/DebugHelper.h>
using namespace sd;
#include <loops/special_kernels.h>
#include <performance/benchmarking/FullBenchmarkSuit.h>
#include <performance/benchmarking/LightBenchmarkSuit.h>
cudaDeviceProp *deviceProperties;
cudaFuncAttributes *funcAttributes = new cudaFuncAttributes[64];
int blockLimit = 128;
int maxThreads = 512;
bool allowedP2P = false;
bool supportedP2P = false;
#ifdef __ND4J_EXPERIMENTAL__
bool experimentalSupport = true;
#else
bool experimentalSupport = false;
#endif
int minThreads = 32;
__constant__ char deviceConstantMemory[49152];
// this method just does type conversion in fancy way
int getDeviceId(Nd4jPointer ptrToDeviceId) {
return (int)(Nd4jLong)ptrToDeviceId;
}
/*
* Basic CUDA constants here: number of blocks per MP
*/
int getDeviceBlockThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
int blockThreshold = 8;
if (ccMajor >= 5)
blockThreshold = 32;
else if (ccMajor == 3)
blockThreshold = 16;
else if (ccMajor < 3)
blockThreshold = 8;
return blockThreshold;
}
/*
* This message returns shared memory threshold value. default overflow ratio is 0.3
*/
int getDeviceSharedThreshold(int deviceId) {
int ccMinor = deviceProperties[deviceId].minor;
int ccMajor = deviceProperties[deviceId].major;
// please note threshold isn't multiple of 32, and that's NOT a mistake
int shmemThreshold;
if (ccMajor == 6 && ccMinor == 0)
shmemThreshold = 65536;
else if (ccMajor == 6 && ccMinor == 1)
shmemThreshold = 49152;
else if (ccMajor == 5 && ccMinor == 2)
shmemThreshold = 98304;
else if (ccMajor == 5)
shmemThreshold = 65536;
else if (ccMajor == 3 && ccMinor == 7)
shmemThreshold = 114688;
else shmemThreshold = 49152;
return shmemThreshold / 0.3;
}
sd::buffer::Buffer<Nd4jLong> * createScalarBuffer(cudaStream_t stream) {
auto scalarShapeInfo = shape::createScalarShapeInfo();
auto buff = sd::buffer::createBuffer(scalarShapeInfo,shape::shapeInfoLength(2), stream);
sd::buffer::copyDataToGpu(&buff, stream);
return buff;
}
class ScalarShapeInformation {
private:
sd::buffer::Buffer<Nd4jLong> *scalarDimension;
sd::buffer::Buffer<Nd4jLong> *scalarShapeInfo;
// std::thread::id threadId;
public:
ScalarShapeInformation(cudaStream_t stream) {
auto scalarDimensionBuff = reinterpret_cast<Nd4jLong *>(malloc(sizeof(Nd4jLong)));
CHECK_ALLOC(scalarDimensionBuff, "Failed to allocate ShapeInfoBuffer", sizeof(Nd4jLong));
scalarDimensionBuff[0] = MAX_DIMENSION;
scalarDimension = sd::buffer::createBuffer(scalarDimensionBuff,1, stream);
scalarShapeInfo = createScalarBuffer(stream);
// threadId = std::this_thread::get_id();
}
~ScalarShapeInformation() {
sd::buffer::freeBuffer(&scalarShapeInfo);
sd::buffer::freeBuffer(&scalarDimension);
}
Nd4jLong *getShapeInfoHostPointer() {
return scalarShapeInfo->data;
}
Nd4jLong * getShapeInfoGpuPointer() {
return scalarShapeInfo->gData;
}
Nd4jLong * getDimensionHostPointer() {
return scalarDimension->data;
}
Nd4jLong * getDimensionGpuPointer() {
return scalarDimension->gData;
}
};
template <typename T>
class ScalarInfo {
sd::buffer::Buffer<T> *scalarData;
ScalarShapeInformation *shapeInfo;
T finalResult;
cudaStream_t streamRef;
public:
ScalarInfo(cudaStream_t stream) {
T *scalarResult = reinterpret_cast<T*>(malloc(sizeof(T)));
CHECK_ALLOC(scalarResult, "Failed to allocate new scalar buffer", sizeof(T));
shapeInfo = new ScalarShapeInformation(stream);
scalarData = sd::buffer::createBuffer(scalarResult,1, stream);
streamRef = stream;
sd::buffer::copyDataToGpu(&scalarData, stream);
}
T getFinalResultFromDevice() {
sd::buffer::copyDataFromGpu(&scalarData, streamRef);
return scalarData->data[0];
}
/**
* Get the device shape information
* representing a scalar
*/
Nd4jLong *getDeviceShapeInfo() {
return shapeInfo->getShapeInfoGpuPointer();
}
/**
* Get the dZ pointers
*/
T *getDevicePointer() {
return scalarData->gData;
}
/**
* Get the infinite dimension device pointer
*/
Nd4jLong *getDimensionDevicePointer() {
return shapeInfo->getDimensionGpuPointer();
}
~ScalarInfo() {
sd::buffer::freeBuffer(&scalarData);
delete shapeInfo;
}
};
void execPairwiseTransform( Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseTransform(&lc, opNum, dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(), extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execPairwiseTransformBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execPairwiseBoolTransform(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsScalar(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStatsScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execBroadcastBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcastBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param dY
* @param dYShapeInfo
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
void execBroadcast(
Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers[11]);
auto tadOnlyShapeInfoZ = reinterpret_cast<Nd4jLong *>(extraPointers[12]);
auto tadOffsetsZ = reinterpret_cast<Nd4jLong *>(extraPointers[13]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hYShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execBroadcast(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, tadOnlyShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloatScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSameScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceSame2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceSame(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceLong(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceLong(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::INT64)
throw datatype_exception::build("execReduceLong wrong Z data type", sd::DataType::INT64, zType);
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceLongFunction,
::execReduceScalar(launchDims, stream, opNum,
dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(), hXShapeInfo,
extraParams,
dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(), hXShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, LONG_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceLong(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const*hXShapeInfo, Nd4jLong const*dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const*hZShapeInfo, Nd4jLong const*dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const*hDimensionShape, Nd4jLong const*dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduceBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto hTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[9]);
auto dTADShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers[10]);
auto reductionPointer = reinterpret_cast<void *>(extraPointers[4]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (zType != sd::DataType::BOOL)
throw std::runtime_error("execReduceBool requires Z operand to have BOOL type");
auto xLength = shape::length(hXShapeInfo);
auto blockWidth = 256;
auto numBlocks = CudaLaunchHelper::getReductionBlocks(xLength, blockWidth);
dim3 launchDims(numBlocks, blockWidth, 32768);
BUILD_DOUBLE_SELECTOR(xType, zType, functions::reduce::ReduceBoolFunction,
::execReduceScalar(launchDims, stream, opNum,
dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(), hXShapeInfo,
extraParams,
dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(), hZShapeInfo,
nullptr, 0, reductionPointer, dTADShapeInfo), LIBND4J_TYPES, BOOL_TYPES);
sd::DebugHelper::checkErrorCode(stream, "execReduceBool(...) failed");
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
* @param dimension
* @param dimensionLength
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduce(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduce(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
(int *) dbDimension->special(), dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
* @param dZ
* @param dZShapeInfo
*/
////////////////////////////////////////////////////////////////////////
void execReduceFloat2(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduceFloat(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadPack.specialShapeInfo(), tadPack.specialOffsets());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
*
* @param opNum
* @param dX
* @param dXShapeInfo
* @param extraParams
*/
////////////////////////////////////////////////////////////////////////
void execIndexReduceScalar(
Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo){
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execIndexReduceScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformSame(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformSame(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformBool(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[0] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[1] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformAny(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto streamSpecial = reinterpret_cast<cudaStream_t &>(extraPointers[4]);
LaunchContext lc(stream, streamSpecial, extraPointers[5], extraPointers[3],
reinterpret_cast<int *>(extraPointers[6]));
NativeOpExecutioner::execTransformAny(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
nullptr, nullptr);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformStrict(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformStrict(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execTransformFloat(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
auto tadShapeInfo = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[10] : nullptr);
auto tadOffsets = reinterpret_cast<Nd4jLong *>(extraPointers != nullptr ? extraPointers[11] : nullptr);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execTransformFloat(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
tadShapeInfo, tadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void checkP2P() {
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
bool tempSupport = true;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX , dY);
if (!canAccess) {
tempSupport = false;
break;
}
}
}
supportedP2P = tempSupport;
cudaSetDevice(curDevice);
} else {
// if we have only 1 device - we say that we support P2P, since all data will be on 1 device
supportedP2P = true;
}
}
void enableP2P(bool enable) {
if (enable == allowedP2P)
return;
int curDevice = 0;
cudaGetDevice(&curDevice);
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
if (curDevice < 0 && curDevice > devCnt)
curDevice = 0;
if (devCnt > 1) {
for (int dX = 0; dX < devCnt; dX++) {
for (int dY = 0; dY < devCnt; dY++) {
if (dX == dY)
continue;
int canAccess = 0;
cudaSetDevice(dX);
cudaDeviceCanAccessPeer(&canAccess, dX , dY);
if (canAccess) {
if (enable) {
cudaDeviceEnablePeerAccess(dY, 0);
} else {
cudaDeviceDisablePeerAccess(dY);
}
} else {
if (sd::Environment::getInstance()->isVerbose()) printf("Peer access [%i] -> [%i] isn't possible\n", dX, dY);
}
}
}
cudaSetDevice(curDevice);
}
allowedP2P = enable;
cudaSetDevice(curDevice);
}
bool isP2PAvailable() {
return supportedP2P;
}
void initializeDevicesAndFunctions() {
try {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
deviceProperties = new cudaDeviceProp[devCnt];
for (int i = 0; i < devCnt; i++) {
cudaSetDevice(i);
cudaGetDeviceProperties(&deviceProperties[i], i);
cudaDeviceSetLimit(cudaLimitStackSize, 4096);
}
cudaSetDevice(0);
checkP2P();
// enabling p2p gpu access if it's supported
if (supportedP2P && devCnt > 1)
enableP2P(allowedP2P);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void initializeFunctions(Nd4jPointer *functions) {
sd::BlasHelper::getInstance()->initializeDeviceFunctions(functions);
/*
cublasSgemv = (CublasSgemv)functions[0];
cublasDgemv = (CublasDgemv)functions[1];
cublasHgemm = (CublasHgemm)functions[2];
cublasSgemm = (CublasSgemm)functions[3];
cublasDgemm = (CublasDgemm)functions[4];
cublasSgemmEx = (CublasSgemmEx)functions[5];
cublasHgemmBatched = (CublasHgemmBatched)functions[6];
cublasSgemmBatched = (CublasSgemmBatched)functions[7];
cublasDgemmBatched = (CublasDgemmBatched)functions[8];
*/
}
/**
* This method acquires memory chunk of requested size on host side
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param flags optional parameter
*/
Nd4jPointer mallocHost(Nd4jLong memorySize, int flags) {
Nd4jPointer pointer;
// cudaHostAllocMapped |cudaHostAllocPortable
auto res = cudaHostAlloc(reinterpret_cast<void **>(&pointer), memorySize + 8, cudaHostAllocDefault);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaHostAlloc failed");
}
return reinterpret_cast<int8_t*>(pointer);
}
/**
* This method acquires memory chunk of requested size on specified device
*
* @param pointer pointer that'll be used for allocation
* @param memorySize memory size, in bytes
* @param ptrToDeviceId pointer to deviceId. For cuda that's just and int, for OpenCL that's pointer to device_id, etc
* @param flags optional parameter
*/
Nd4jPointer mallocDevice(Nd4jLong memorySize, int deviceId, int flags) {
Nd4jPointer pointer;
auto res = cudaMalloc(reinterpret_cast<void **>(&pointer), memorySize + 8);
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMalloc failed");
}
return reinterpret_cast<int8_t*>(pointer);
}
/**
* This method releases previously allocated host memory space
*
* @param pointer pointer that'll be freed
*/
int freeHost(Nd4jPointer pointer) {
auto res = cudaFreeHost(reinterpret_cast<void *>(pointer));
if (res != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFreeHost failed");
}
return 1L;
}
/**
* This method releases previously allocated memory space on device
*
* @param pointer pointer that'll be freed
* @param ptrToDeviceId pointer to deviceId.
*/
int freeDevice(Nd4jPointer pointer, int deviceId) {
auto res = cudaFree(reinterpret_cast<void *>(pointer));
// we're intentionally skipping
if (res != 0 && res != 1) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(res);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaFree failed");
}
return res == 0 ? 1L : 0L;
}
Nd4jPointer createContext() {
return 0L;
}
Nd4jPointer createStream() {
auto stream = new cudaStream_t();
auto dZ = cudaStreamCreate(stream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamCreate failed");
}
return stream;
}
Nd4jPointer createEvent() {
Nd4jPointer nativeEvent= (Nd4jPointer) malloc(sizeof(cudaEvent_t));
CHECK_ALLOC(nativeEvent, "Failed to allocate new CUDA event buffer", sizeof(cudaEvent_t));
auto dZ = cudaEventCreateWithFlags(reinterpret_cast<cudaEvent_t *>(&nativeEvent), cudaEventDisableTiming);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventCreateWithFlags failed");
}
return nativeEvent;
}
int registerEvent(Nd4jPointer event, Nd4jPointer stream) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto pStream = reinterpret_cast<cudaStream_t *>(stream);
auto dZ = cudaEventRecord(*pEvent, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventRecord failed");
}
return 1;
}
int setDevice(int deviceId) {
AffinityManager::setCurrentDevice(deviceId);
return 1;
}
Nd4jLong getDeviceFreeMemoryDefault() {
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
return (Nd4jLong) memFree;
}
Nd4jLong getDeviceFreeMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memFree;
}
Nd4jLong getDeviceTotalMemory(int device) {
int orig = -1;
cudaGetDevice(&orig);
if (device >= 0 && device != orig) {
cudaSetDevice(device);
}
size_t memFree = 0;
size_t memTotal = 0;
cudaMemGetInfo(&memFree, &memTotal);
if (device >= 0 && device != orig) {
cudaSetDevice(orig);
}
return (Nd4jLong) memTotal;
}
int memcpySync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaMemcpyKind kind;
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
break;
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpy failed");
return 0;
}
return 1;
}
int memcpyAsync(Nd4jPointer dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto pStream = reinterpret_cast<cudaStream_t *>(reserved);
cudaMemcpyKind kind;
//sd::DebugHelper::checkErrorCode(pStream, "Preliminary sync failed");
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
break;
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
default: {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("UNDEFNED MEMCPY");
return 0;
}
}
auto dZ = cudaMemcpyAsync(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind, *pStream);
//auto dZ = cudaMemcpy(reinterpret_cast<void *>(dst), const_cast<const void *>(reinterpret_cast<void *>(src)), static_cast<size_t>(size), kind);
if (dZ != 0) {
printf("Failed on [%p] -> [%p], size: [%i], direction: [%i], dZ: [%i]\n", src, dst, size, flags, static_cast<int>(dZ));
fflush(stdout);
fflush(stderr);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyAsync failed");
return 0;
}
return 1;
}
int memsetSync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto dZ = cudaMemset(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size));
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemset failed");
}
return 1;
}
int memsetAsync(Nd4jPointer dst, int value, Nd4jLong size, int flags, Nd4jPointer reserved) {
auto pStream = reinterpret_cast<cudaStream_t *>(reserved);
auto dZ = cudaMemsetAsync(reinterpret_cast<void *>(dst), value, static_cast<size_t>(size), *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemsetAsync failed");
}
return 1;
}
int destroyEvent(Nd4jPointer event) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto dZ = cudaEventDestroy(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventDestroy failed");
}
return 1;
}
int streamSynchronize(Nd4jPointer stream) {
auto pStream = reinterpret_cast<cudaStream_t *>(stream);
auto dZ = cudaStreamSynchronize(*pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaStreamSynchronize failed");
}
return 1L;
}
int eventSynchronize(Nd4jPointer event) {
auto pEvent = reinterpret_cast<cudaEvent_t *>(&event);
auto dZ = cudaEventSynchronize(*pEvent);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaEventSynchronize failed");
}
return 1L;
}
int getAvailableDevices() {
int devCnt = 0;
cudaGetDeviceCount(&devCnt);
return devCnt;
}
void enableDebugMode(bool reallyEnable) {
sd::Environment::getInstance()->setDebug(reallyEnable);
}
void setGridLimit(int gridSize) {
if (gridSize > 8192)
gridSize = 8192;
if (gridSize < 1)
gridSize = 1;
blockLimit = gridSize;
}
int ompGetMaxThreads() {
return maxThreads;
}
int ompGetNumThreads() {
return maxThreads;
}
void setOmpNumThreads(int threads) {
if (threads > 1024)
threads = 1024;
if (threads < 32)
threads = 32;
maxThreads = threads;
}
void enableVerboseMode(bool reallyEnable) {
sd::Environment::getInstance()->setVerbose(reallyEnable);
}
int getDeviceMajor(int device) {
return deviceProperties[device].major;
}
int getDeviceMinor(int device) {
return deviceProperties[device].minor;
}
const char * getDeviceName(int device) {
return deviceProperties[device].name;
}
void specialConcat(
Nd4jPointer *extraPointers,
int dimension,
int numArrays,
Nd4jPointer *data,
Nd4jPointer *inputShapeInfo,
void *dZ,
Nd4jLong const* dZShapeInfo, Nd4jPointer *tadPointers, Nd4jPointer *offsetPointers) {
try {
BUILD_SINGLE_SELECTOR(ArrayOptions::dataType(dZShapeInfo), sd::SpecialMethods,
::concatCpuGeneric(dimension, numArrays, data, inputShapeInfo, dZ, dZShapeInfo),
LIBND4J_TYPES);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
/**
* This method saves
*/
sd::TadPack* tadOnlyShapeInfo(Nd4jLong const* dXShapeInfo, int *dimension, int dimensionLength) {
try {
auto pack = new TadPack();
*pack = sd::ConstantTadHelper::getInstance()->tadForDimensions(dXShapeInfo, dimension, dimensionLength);
return pack;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong const* getPrimaryShapeInfo(sd::TadPack* pack) {
return pack->primaryShapeInfo();
}
Nd4jLong const* getPrimaryOffsets(sd::TadPack* pack) {
return pack->primaryOffsets();
}
Nd4jLong const* getSpecialShapeInfo(sd::TadPack* pack) {
return pack->specialShapeInfo();
}
Nd4jLong const* getSpecialOffsets(sd::TadPack* pack) {
return pack->specialOffsets();
}
Nd4jLong getNumberOfTads(sd::TadPack* pack) {
return pack->numberOfTads();
}
int getShapeInfoLength(sd::TadPack* pack) {
return pack->shapeInfoLength();
}
int memcpyConstantAsync(Nd4jLong dst, Nd4jPointer src, Nd4jLong size, int flags, Nd4jPointer reserved) {
cudaStream_t *pStream = reinterpret_cast<cudaStream_t *>(reserved);
cudaMemcpyKind kind;
DEBUG_KERNEL(pStream, -1);
switch (flags) {
case 0: {
kind = cudaMemcpyHostToHost;
}
break;
case 1: {
kind = cudaMemcpyHostToDevice;
}
break;
case 2: {
kind = cudaMemcpyDeviceToHost;
}
case 3: {
kind = cudaMemcpyDeviceToDevice;
}
break;
}
auto dZ = cudaMemcpyToSymbolAsync(deviceConstantMemory, const_cast<const void *>(src), size, dst, kind, *pStream);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaMemcpyToSymbolAsync failed");
}
return 1;
}
Nd4jPointer getConstantSpace() {
Nd4jPointer dConstAddr;
cudaError_t dZ = cudaGetSymbolAddress(reinterpret_cast<void **>(&dConstAddr), deviceConstantMemory);
if (dZ != 0) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(dZ);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("cudaGetSymbolAddress failed");
}
return dConstAddr;
}
void pullRows(Nd4jPointer *extraPointers,
OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* zShapeInfo, Nd4jLong const* dZShapeInfo,
Nd4jLong n,
Nd4jLong *indexes,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets,
Nd4jLong const* zTadShapeInfo,
Nd4jLong const* zTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
dim3 launchDims(64, 256, 1024);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, pullRowsKernelGeneric,
(launchDims, stream, dbX->special(), dbZ->special(), n, indexes, tadShapeInfo, tadOffsets, zTadShapeInfo, zTadOffsets),
LIBND4J_TYPES);
DEBUG_KERNEL(stream, -1);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void average(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong const* xShapeInfo,
Nd4jPointer *dx, Nd4jLong const* dXShapeInfo,
void *z, Nd4jLong const* zShapeInfo,
void *dz, Nd4jLong const* dzShapeInfo,
int n,
Nd4jLong length,
bool propagate) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance()->isDebugAndVerbose())
printf("averageFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(256, 256, 4096);
BUILD_SINGLE_SELECTOR(xType, averagingKernelGeneric, (launchDims, stream, dX, dz, n, length, propagate),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AverageFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::averageGeneric(x, z, zShapeInfo, n, length, propagate),
LIBND4J_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void accumulate(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jLong const* xShapeInfo,
Nd4jPointer *dx, Nd4jLong const* dXShapeInfo,
void *z, Nd4jLong const* zShapeInfo,
void *dz, Nd4jLong const* dzShapeInfo,
int n,
Nd4jLong length) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extras[1]);
int mode = getDeviceId(extras[3]);
auto dX = reinterpret_cast<void **>(dx);
if (sd::Environment::getInstance()->isDebugAndVerbose())
printf("accumulateFloat called\n");
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// launching on gpu
if (mode == 0) {
dim3 launchDims(n, 256, 16384);
BUILD_SINGLE_SELECTOR(xType, accumulateKernelGeneric, (launchDims, stream, dX, dz, n, length),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "AccumulateFloat(...) failed");
} else {
// launching on host memory
BUILD_SINGLE_SELECTOR(xType, sd::SpecialMethods, ::accumulateGeneric(x, z, zShapeInfo, n, length),
LIBND4J_TYPES);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void shuffle(Nd4jPointer *extras,
Nd4jPointer *x, Nd4jPointer *xShapeInfo,
Nd4jPointer *dx, Nd4jPointer *dXShapeInfo,
Nd4jPointer *z, Nd4jPointer *zShapeInfo,
Nd4jPointer *dz, Nd4jPointer *dZShapeInfo,
int N,
int *shuffleMap,
Nd4jPointer *tadShapeInfo,
Nd4jPointer *tadOffsets) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
auto dX = reinterpret_cast<void **>(dx);
auto dZ = reinterpret_cast<void **>(dz);
auto xShape = reinterpret_cast<Nd4jLong**>(xShapeInfo);
auto dxShape = reinterpret_cast<Nd4jLong**>(dXShapeInfo);
auto tadOnlyShapeInfo = reinterpret_cast<Nd4jLong **>(tadShapeInfo);
auto tadOffset = reinterpret_cast<Nd4jLong **>(tadOffsets);
auto xType = sd::ArrayOptions::dataType(xShape[0]);
dim3 launchDims(256, 512, 8192);
BUILD_SINGLE_SELECTOR(xType, shuffleKernelGeneric,
(launchDims, stream, dX, dxShape, dZ, N, shuffleMap, tadOnlyShapeInfo, tadOffset),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "shuffle(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
bool isExperimentalEnabled() {
return sd::Environment::getInstance()->isExperimentalBuild();
}
void setOmpMinThreads(int threads) {
minThreads = sd::math::nd4j_max<int>(32, threads);
minThreads = sd::math::nd4j_min<int>(maxThreads, minThreads);
}
int getDevice() {
return sd::AffinityManager::currentDeviceId();
}
void setElementThreshold(int num) {
// this is no-op for CUDA
}
void setTADThreshold(int num) {
// this is no-op for CUDA
}
////////////////////////////////////////////////////////////////////////
void execSummaryStats(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
bool biasCorrected) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execSummaryStatsTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
bool biasCorrected,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execSummaryStats(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength,
tadShapeInfo, tadOffsets,
biasCorrected);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbDimension});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Tad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadOnlyShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* yTadOnlyShapeInfo, Nd4jLong const* yTadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(hXShapeInfo,
dimension,
shape::length(hDimensionShape));
auto tadLength = shape::length(tadPack.primaryShapeInfo());
auto yLength = shape::length(hYShapeInfo);
auto xLength = shape::length(hXShapeInfo);
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
if (tadLength == yLength || tadLength == xLength) {
// nd4j_printf("== way\n","");
NativeOpExecutioner::execReduce3(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadOnlyShapeInfo, tadOffsets, yTadOnlyShapeInfo, yTadOffsets);
} else
NativeOpExecutioner::execReduce3TAD(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadOnlyShapeInfo, yTadOffsets, yTadOnlyShapeInfo, yTadOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execReduce3Scalar(Nd4jPointer *extraPointers,int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3Scalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>());
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBool(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hScalarShapeInfo).specialAsT<Nd4jLong>(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarBoolTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalarBool(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParams,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dbScalars->primary(), hScalarShapeInfo, dbScalars->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hScalarShapeInfo).specialAsT<Nd4jLong>(),
dimension, dimensionLength,
tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalar(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalar, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalar});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execScalar(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
dbScalar->primary(), hScalarShapeInfo, dbScalar->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hScalarShapeInfo).specialAsT<Nd4jLong>(),
extraParams);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalar});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execScalarTad(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbScalars, Nd4jLong const* hScalarShapeInfo, Nd4jLong const* dScalarShapeInfo,
void *extraParams,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* tadShapeInfo, Nd4jLong const* tadOffsets,
Nd4jLong const* tadShapeInfoZ, Nd4jLong const* tadOffsetsZ) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbScalars});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xType = sd::ArrayOptions::dataType(hXShapeInfo);
auto yType = sd::ArrayOptions::dataType(hScalarShapeInfo);
auto zType = sd::ArrayOptions::dataType(hZShapeInfo);
if (yType != xType && yType != sd::DataType::BOOL && !isExperimentalEnabled())
throw sd::datatype_exception::build("execScalar both operands must have same data type", xType, yType);
dim3 launchDims(256, 256, 16384);
#ifdef __ND4J_EXPERIMENTAL__
BUILD_PAIRWISE_SELECTOR(xType, yType, zType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dX, dXShapeInfo, dZ, dZShapeInfo, dScalars, extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES, LIBND4J_TYPES);
#else
BUILD_SINGLE_SELECTOR_THRICE(xType, functions::scalar::ScalarTransform, ::executeCudaAlongDimension(launchDims, stream, opNum, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(), dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(), dbScalars->special(), extraParams, dimension, dimensionLength, tadShapeInfo, tadOffsets, tadShapeInfoZ, tadOffsetsZ), LIBND4J_TYPES);
#endif
DEBUG_KERNEL(stream, opNum);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbScalars});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void execAggregate(Nd4jPointer *extraPointers,
int opNum,
void **arguments,
int numArguments,
Nd4jLong **shapes,
int numShapes,
int *indexArguments,
int numIndexArguments,
int **intArrays,
int numIntArrays,
void *realArguments,
int numRealArguments,
sd::DataType dtype) {
}
void batchExecutor(Nd4jPointer *extraPointers,
int numAggregates,
int opNum,
int maxArgs,
int maxShapes,
int maxIntArrays,
int maxIntArraySize,
int maxIdx,
int maxReals,
void *ptrToArguments,
sd::DataType dtype) {
}
void execAggregateBatch(Nd4jPointer *extraPointers,
int numAggregates, int opNum,
int maxArgs, int maxShapes,
int maxIntArrays, int maxIntArraySize,
int maxIdx, int maxReals,
void *ptrToArguments, sd::DataType dtype) {
}
////////////////////////////////////////////////////////////////////////
void execRandom(Nd4jPointer *extraPointers,
int opNum,
Nd4jPointer stateHost,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom2(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
////////////////////////////////////////////////////////////////////////
void execRandom3(Nd4jPointer *extraPointers, int opNum, Nd4jPointer stateHost,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
void *extraArguments) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY});
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execRandom(&lc, opNum, stateHost,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
extraArguments);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
Nd4jPointer initRandom(Nd4jPointer *extraPointers, long seed, long bufferSize, Nd4jPointer ptrToBuffer) {
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
// we don't synchronize at random initialization, it's safe to go unsync here
// cudaStreamSynchronize(*stream);
auto ptrDev = reinterpret_cast<unsigned long long *>(ptrToBuffer);
auto buffer = new sd::random::RandomBuffer(seed, bufferSize, reinterpret_cast<uint64_t *>(ptrHost), reinterpret_cast<uint64_t *>(ptrDev));
buffer->propagateToDevice(buffer, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed A");
// we generate sequence in the host memory
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// and copy it to gpu
cudaMemcpyAsync(ptrDev, ptrHost, bufferSize * 8, cudaMemcpyHostToDevice, *stream);
sd::DebugHelper::checkErrorCode(stream, "initRandom(...) failed B");
return buffer;
}
void destroyRandom(Nd4jPointer ptrBuffer) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrBuffer);
// FIXME: it's bad thing, but we can't know in advance, which stream(s) where using this generator in practice
cudaDeviceSynchronize();
delete buffer;
}
void refreshBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom);
unsigned long long *ptrHost = reinterpret_cast<unsigned long long *>(extraPointers[0]);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
cudaStreamSynchronize(*stream);
uint64_t *ptrDev = buffer->getDeviceBuffer();
// update rng state
buffer->setSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
// refresh buffer on host size
sd::random::Xoroshiro128 generator(buffer);
generator.refreshBuffer();
// copy back to gpu
cudaMemcpyAsync(ptrDev, ptrHost, buffer->getSize() * 8, cudaMemcpyHostToDevice, *stream);
}
void reSeedBuffer(Nd4jPointer *extraPointers, long seed, Nd4jPointer ptrRandom) {
sd::random::RandomBuffer *buffer = reinterpret_cast<sd::random::RandomBuffer *> (ptrRandom);
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
cudaStreamSynchronize(*stream);
// update rng state
buffer->reSeed(seed);
buffer->setOffset(0);
buffer->propagateToDevice(buffer, *stream);
}
/**
* Return the length of a shape buffer
* based on the pointer
* @param buffer the buffer pointer to check
* @return
*/
int lengthForShapeBufferPointer(Nd4jPointer buffer) {
auto shapeBuffer = reinterpret_cast<Nd4jLong *>(buffer);
return shape::shapeInfoLength(shape::rank(shapeBuffer));
}
/**
* The pointer to get the address for
*
* @param address the address to get the pointer
* @return the pointer for the given address
*/
Nd4jPointer pointerForAddress(Nd4jLong address) {
return reinterpret_cast<Nd4jPointer >(address);
}
void tear(Nd4jPointer *extras,
OpaqueDataBuffer *dbX, Nd4jLong const* xShapeInfo, Nd4jLong const* dXShapeInfo,
Nd4jPointer *targets,
Nd4jLong const* zShapeInfo,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({}, {dbX});
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extras[1]);
dim3 launchDims(512, 512, 512);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, tearKernelGeneric,
(launchDims, stream, dbX->special(), dXShapeInfo, targets, zShapeInfo, tadShapeInfo, tadOffsets),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "tearFloat(...) failed");
InteropDataBuffer::registerSpecialUse({}, {dbX});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void prescanArrayRecursive(Nd4jPointer *extras, int *dZ, int *dX, int numElements, int level) {
auto stream = reinterpret_cast<cudaStream_t *>(extras[1]);
auto g_scanBlockSums = reinterpret_cast<int **>(extras[2]);
int blockSize = 512; // max size of the thread blocks
int numBlocks = sd::math::nd4j_max<int>(1, static_cast<int>(ceil(static_cast<float>(numElements) / (2.f * blockSize))));
int numThreads;
if (numBlocks > 1)
numThreads = blockSize;
else if (sd::isPowerOfTwo(numElements))
numThreads = numElements / 2;
else
numThreads = sd::floorPow2(numElements);
int numEltsPerBlock = numThreads * 2;
// if this is a non-power-of-2 array, the last block will be non-full
// compute the smallest power of 2 able to compute its scan.
int numEltsLastBlock =
numElements - (numBlocks-1) * numEltsPerBlock;
int numThreadsLastBlock = sd::math::nd4j_max<int>(1, numEltsLastBlock / 2);
int np2LastBlock = 0;
int sharedMemLastBlock = 0;
if (numEltsLastBlock != numEltsPerBlock) {
np2LastBlock = 1;
if(!isPowerOfTwo(numEltsLastBlock))
numThreadsLastBlock = floorPow2(numEltsLastBlock);
unsigned int extraSpace = (2 * numThreadsLastBlock) / NUM_BANKS;
sharedMemLastBlock = sizeof(int) * (2 * numThreadsLastBlock + extraSpace);
}
// padding space is used to avoid shared memory bank conflicts
int extraSpace = numEltsPerBlock / NUM_BANKS;
int sharedMemSize = sizeof(int) * (numEltsPerBlock + extraSpace);
// setup execution parameters
// if NP2, we process the last block separately
dim3 grid(max(1, numBlocks - np2LastBlock), 1, 1);
dim3 threads(numThreads, 1, 1);
dim3 gridOnes(1, 1, 1);
dim3 threadsOnes(numThreadsLastBlock, 1, 1);
if (sharedMemSize < 2048)
sharedMemSize = 2048;
if (sharedMemLastBlock < 2048)
sharedMemLastBlock = 2048;
// execute the scan
if (numBlocks > 1) {
sd::prescanLauncher<true, false>(grid, threads, sharedMemSize, stream, dZ, dX, g_scanBlockSums[level], numThreads * 2, 0, 0);
if (np2LastBlock) {
sd::prescanLauncher<true, true>(gridOnes, threadsOnes, sharedMemLastBlock, stream, dZ, dX, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
// After scanning all the sub-blocks, we are mostly done. But now we
// need to take all of the last values of the sub-blocks and scan those.
// This will give us a new value that must be sdded to each block to
// get the final results.
// recursive (CPU) call
prescanArrayRecursive(extras, g_scanBlockSums[level], g_scanBlockSums[level], numBlocks, level+1);
sd::uniformAdd<<<grid, threads, 1024, *stream>>>(dZ, g_scanBlockSums[level], numElements - numEltsLastBlock, 0, 0);
if (np2LastBlock) {
sd::uniformAdd<<<1, numThreadsLastBlock, 1024, *stream>>>(dZ, g_scanBlockSums[level], numEltsLastBlock, numBlocks - 1, numElements - numEltsLastBlock);
}
} else if (isPowerOfTwo(numElements)) {
sd::prescanLauncher<false, false>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numThreads * 2, 0, 0);
} else {
sd::prescanLauncher<false, true>(grid, threads, sharedMemSize, stream, dZ, dX, 0, numElements, 0, 0);
}
sd::DebugHelper::checkErrorCode(stream, "prescanArray(...) failed");
}
////////////////////////////////////////////////////////////////////////
void execReduce3All(Nd4jPointer *extraPointers,
int opNum,
OpaqueDataBuffer *dbX, Nd4jLong const* hXShapeInfo, Nd4jLong const* dXShapeInfo,
void *extraParamsVals,
OpaqueDataBuffer *dbY, Nd4jLong const* hYShapeInfo, Nd4jLong const* dYShapeInfo,
OpaqueDataBuffer *dbZ, Nd4jLong const* hZShapeInfo, Nd4jLong const* dZShapeInfo,
OpaqueDataBuffer *dbDimension, Nd4jLong const* hDimensionShape, Nd4jLong const* dDimensionShape,
Nd4jLong const* xTadShapeInfo, Nd4jLong const* xOffsets,
Nd4jLong const* yTadShapeInfo, Nd4jLong const* yOffsets) {
try {
InteropDataBuffer::prepareSpecialUse({dbZ}, {dbX, dbY, dbDimension});
InteropDataBuffer::preparePrimaryUse({}, {dbDimension});
auto dimension = reinterpret_cast<int *>(dbDimension->primary());
int dimensionLength = static_cast<int>(shape::length(hDimensionShape));
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
NativeOpExecutioner::execReduce3All(&lc, opNum,
dbX->primary(), hXShapeInfo, dbX->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hXShapeInfo).specialAsT<Nd4jLong>(),
extraParamsVals,
dbY->primary(), hYShapeInfo, dbY->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hYShapeInfo).specialAsT<Nd4jLong>(),
dbZ->primary(), hZShapeInfo, dbZ->special(), ConstantShapeHelper::getInstance()->bufferForShapeInfo(hZShapeInfo).specialAsT<Nd4jLong>(),
reinterpret_cast<int *>(dbDimension->special()), dimensionLength,
xTadShapeInfo, xOffsets, yTadShapeInfo, yOffsets);
InteropDataBuffer::registerSpecialUse({dbZ}, {dbX, dbY});
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sort(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
bool descending) {
try {
cudaStream_t *stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_SINGLE_SELECTOR(xType, bitonicSortStepGeneric,
(launchDims, stream, dX, dXShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_SINGLE_SELECTOR(xType, bitonicArbitraryStepGeneric,
(launchDims, stream, dX, dXShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
sd::DebugHelper::checkErrorCode(stream, "sort(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByKey(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo))
return;
if (xLength != yLength)
throw std::runtime_error("sortByKey: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortByValue(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto xLength = shape::length(xShapeInfo);
auto yLength = shape::length(yShapeInfo);
auto xEWS = shape::elementWiseStride(xShapeInfo);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
if (shape::isEmpty(xShapeInfo) || shape::isEmpty(yShapeInfo))
return;
if (xLength != yLength)
throw std::runtime_error("sortByValue: keys and values must have the same size");
// check if xLength is a power of 2, and use bitonic sort, if that's the case
if ((xLength != 0) && ((xLength & (xLength - 1)) == 0) && (xLength <= 1024 * 1024 * 10)) {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
dim3 launchDims(numBlocks, numThreads, 32768);
for (int k = 2; k <= xLength; k = 2 * k) {
for (int j = k >> 1; j > 0; j = j >> 1) {
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicSortStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, j, k, xLength, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
}
}
} else {
int numThreads = sd::math::nd4j_min<int>(512, xLength);
int numBlocks = xLength / numThreads;
if (xLength % numThreads > 0 || numBlocks == 0)
numBlocks++;
numBlocks = sd::math::nd4j_min<int>(512, numBlocks);
dim3 launchDims(numBlocks, numThreads, 32768);
int max = 2, dg = 0;
while (max < xLength) {
max <<= 1;
dg++;
}
max <<= 1;
for (int window = 2; window < max; window <<= 1) {
int n = window;
int rev = 0;
do {
int half = n >> 1;
BUILD_DOUBLE_SELECTOR(xType, yType, bitonicArbitraryStepGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, n, xLength, rev, descending),
LIBND4J_TYPES, LIBND4J_TYPES);
n >>= 1;
rev = 1;
} while (n > 1);
}
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByKey(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
int *dimension,
int dimensionLength,
bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
auto yType = sd::ArrayOptions::dataType(yShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dX, dXShapeInfo, dy, dyShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
LIBND4J_TYPES, LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadKey(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTadByValue(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
void *y, Nd4jLong const* yShapeInfo,
void *dy, Nd4jLong const* dyShapeInfo,
int *dimension,
int dimensionLength,
bool descending) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 256, 2048);
auto xType = sd::ArrayOptions::dataType(yShapeInfo);
auto yType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_DOUBLE_SELECTOR(xType, yType, oesTadGenericKey,
(launchDims, stream, dy, dyShapeInfo, dX, dXShapeInfo, nullptr, dimensionLength, tadPack.platformShapeInfo(), tadPack.platformOffsets(), descending),
LIBND4J_TYPES, LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTadValue(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortTad(Nd4jPointer *extraPointers,
void *x, Nd4jLong const* xShapeInfo,
void *dX, Nd4jLong const* dXShapeInfo,
int *dimension,
int dimensionLength,
Nd4jLong const* tadShapeInfo,
Nd4jLong const* tadOffsets,
bool descending) {
try {
// to be implemented
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto context = extraPointers[0] == 0 ? LaunchContext::defaultContext()
: reinterpret_cast<LaunchContext *>(extraPointers[0]);
auto tadPack = sd::ConstantTadHelper::getInstance()->tadForDimensions(xShapeInfo, dimension, dimensionLength);
dim3 launchDims((int) tadPack.numberOfTads(), 512, 33768);
auto xType = sd::ArrayOptions::dataType(xShapeInfo);
BUILD_SINGLE_SELECTOR(xType, oesTadGeneric,
(launchDims, stream, dX, dXShapeInfo, nullptr, dimensionLength, tadShapeInfo, tadOffsets, descending),
LIBND4J_TYPES);
sd::DebugHelper::checkErrorCode(stream, "sortTad(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void sortCooIndices(Nd4jPointer *extraPointers, Nd4jLong *indices, void *values, Nd4jLong length, int rank) {
throw std::runtime_error("sortCooIndices:: Not implemented yet");
}
Nd4jLong* mmapFile(Nd4jPointer *extraPointers, const char *fileName, Nd4jLong length) {
return nullptr;
}
void munmapFile(Nd4jPointer *extraPointers, Nd4jLong* ptrMap, Nd4jLong length) {
}
sd::graph::ResultWrapper* executeFlatGraph(Nd4jPointer *extraPointers, Nd4jPointer flatBufferPointer) {
try {
return sd::graph::GraphExecutioner::executeFlatBuffer(flatBufferPointer);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getResultWrapperSize(sd::graph::ResultWrapper* ptr) {
return ptr->size();
}
Nd4jPointer getResultWrapperPointer(sd::graph::ResultWrapper* ptr) {
return ptr->pointer();
}
const char* getAllCustomOps() {
return sd::ops::OpRegistrator::getInstance()->getAllCustomOperations();
}
sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) {
sd::graph::VariableSpace varSpace;
Context block(2, &varSpace);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numBArgs; e++)
block.getBArguments()->push_back(bArgs[e]);
for (int e = 0; e < numDArgs; e++)
block.getDArguments()->push_back((sd::DataType) dArgs[e]);
for (int e = 0; e < numInputShapes; e++) {
auto shape_ = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
// we shouldn't copy buffer if that's empty array
void *buffer_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD_ = sd::ArrayOptions::arrayType(shape_) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputShapes];
auto array = new sd::NDArray(buffer_, bufferD_, shape_);
// block should contain references to proper variable
varSpace.putVariable(1, e, array);
block.pickInput(1, e);
inShapes.push_back(shape_);
}
auto shapeList = op->calculateOutputShape(&inShapes, block);
if (varSpace.launchContext()->getWorkspace() != nullptr)
shapeList->detach();
return shapeList;
}
sd::ShapeList* calculateOutputShapes2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool *bArgs, int numBArgs, int *dArgs, int numDArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputBuffers, inputShapes, numInputShapes, tArgs, numTArgs,
iArgs, numIArgs, bArgs, numBArgs, dArgs, numDArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
sd::ShapeList* _calculateOutputShapes(Nd4jPointer* extraPointers, sd::ops::DeclarableOp* op, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
Context block(1);
sd::ShapeList inShapes;
for (int e = 0; e < numIArgs; e++)
block.getIArguments()->push_back(iArgs[e]);
for (int e = 0; e < numTArgs; e++)
block.getTArguments()->push_back(tArgs[e]);
for (int e = 0; e < numInputShapes; e++)
inShapes.push_back(reinterpret_cast<Nd4jLong *>(inputShapes[e]));
auto shapeList = op->calculateOutputShape(&inShapes, block);
return shapeList;
}
sd::ShapeList* calculateOutputShapes(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputShapes, int numInputShapes, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs) {
try {
auto op = sd::ops::OpRegistrator::getInstance()->getOperation(hash);
return _calculateOutputShapes(extraPointers, op, inputShapes, numInputShapes, tArgs, numTArgs, iArgs, numIArgs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getShapeListSize(sd::ShapeList* list) {
return list->size();
}
Nd4jLong const* getShape(sd::ShapeList* list, Nd4jLong i) {
return list->at(i);
}
static FORCEINLINE Nd4jStatus realExec(sd::ops::DeclarableOp* op, Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
if (op == nullptr)
nd4j_printf("Can't find requested operation: [%lld]\n", hash);
// we're using the same fake nodeId everywhere here
std::vector<sd::NDArray*> inputs(numInputs);
std::vector<sd::NDArray*> outputs(numOutputs);
std::vector<double> ttArgs(numTArgs);
std::vector<bool> bbArgs(numBArgs);
std::vector<Nd4jLong> iiArgs(numIArgs);
// filling block now with inputs
for (int e = 0; e < numInputs; e++) {
auto shape = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[e + numInputs];
inputs[e] = new sd::NDArray(buffer, bufferD, shape);
}
// if not inplace - transferring output arrays
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
// we want to keep original output shape intact
auto shape = shape::copyShape(reinterpret_cast<Nd4jLong *>(outputShapes[e]));
void *buffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e];
void *bufferD = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : outputBuffers[e + numOutputs];
// FIXME: revisit this.
bool canNullify = true;
for (int i = 0; i < numInputs; i++) {
void *ibuffer = sd::ArrayOptions::arrayType(shape) == ArrayType::EMPTY ? nullptr : inputBuffers[i];
if (ibuffer == buffer) {
canNullify = false;
break;
}
}
if (canNullify && buffer != nullptr)
memset((uint8_t *) buffer, '\0', shape::length(shape) * DataTypeUtils::sizeOfElement(ArrayOptions::dataType(shape)));
auto array = new sd::NDArray(buffer, bufferD, shape);
outputs[e] = array;
}
for (int e = 0; e < numIArgs; e++)
iiArgs[e] = iArgs[e];
for (int e = 0; e < numTArgs; e++)
ttArgs[e] = tArgs[e];
for (int e = 0; e < numBArgs; e++)
bbArgs[e] = bArgs[e];
// hypothetically at this point we have everything filled
auto dZ = op->execute(inputs, outputs, ttArgs, iiArgs, bbArgs, std::vector<sd::DataType>(), isInplace);
//auto dZ = op->execute(inputs, ttArgs, iiArgs, isInplace);
if (!isInplace)
for (int e = 0; e < numOutputs; e++) {
//shape::printShapeInfoLinear("JVM output shape", (int *) outputShapes[e]);
//shape::printShapeInfoLinear("C++ output shape", (int *) outputs[e]->shapeInfo());
//outputs[e]->printIndexedBuffer("C++ raw output");
//outputs[e]->printBuffer("C++ indexed output");
if (outputs[e]->ordering() != shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])))
outputs[e]->streamline(shape::order(reinterpret_cast<Nd4jLong *>(outputShapes[e])));
}
for (auto v: inputs)
delete v;
for (auto v: outputs)
delete v;
return Status::OK();
}
int execCustomOp(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer* inputBuffers, Nd4jPointer* inputShapes, int numInputs, Nd4jPointer* outputBuffers, Nd4jPointer* outputShapes, int numOutputs, double* tArgs, int numTArgs, Nd4jLong *iArgs, int numIArgs, bool* bArgs, int numBArgs, bool isInplace) {
try {
auto op = sd::ops::OpRegistrator::getInstance()->getOperation(hash);
return realExec(op, extraPointers, hash, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs, tArgs, numTArgs, iArgs, numIArgs, bArgs, numBArgs, isInplace);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
int execCustomOp2(Nd4jPointer* extraPointers, Nd4jLong hash, Nd4jPointer opContext) {
try {
auto op = sd::ops::OpRegistrator::getInstance()->getOperation(hash);
auto context = reinterpret_cast<Context *>(opContext);
auto result = op->execute(context);
auto res = cudaStreamSynchronize(*context->launchContext()->getCudaStream());
if (res != 0)
throw sd::cuda_exception::build("customOp execution failed", res);
for (auto v:context->fastpath_in()) {
if (!v->isEmpty())
v->syncToDevice();
}
for (auto v:context->fastpath_out()) {
if (!v->isEmpty())
v->syncToDevice();
}
return result;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
int registerGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer flatBufferPointer) {
try {
auto graph = sd::graph::GraphExecutioner::importFromFlatPointer(flatBufferPointer);
sd::graph::GraphHolder::getInstance()->registerGraph(graphId, graph);
return ND4J_STATUS_OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
static VariablesSet* executeStoredGraphT(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
auto graph = sd::graph::GraphHolder::getInstance()->pullGraph(graphId);
auto varSpace = graph->getVariableSpace()->clone();
std::vector<sd::NDArray*> handles;
for (int e = 0; e < numInputs; e++) {
auto idx = inputIndices[e];
// we'll delete this array later, together with cloned VariableSpace
auto array = new sd::NDArray(inputBuffers[e], reinterpret_cast<Nd4jLong *>(inputShapes[e]));
handles.emplace_back(array);
if (varSpace->hasVariable(idx)) {
auto var = varSpace->getVariable(idx);
if (var->hasNDArray())
delete var->getNDArray();
var->setNDArray(array);
} else
varSpace->putVariable(idx, array);
}
auto dZ = sd::graph::GraphExecutioner::execute(graph, varSpace);
auto varSet = new sd::graph::VariablesSet(dZ);
if (dZ == ND4J_STATUS_OK) {
// pull back results, and provide them
auto outputs = graph->fetchOutputs();
for (int e = 0; e < outputs->size(); e++) {
// we're only getting variable ID/Index from original grap. values will be taken from cloned workspace
std::pair<int, int> varId(outputs->at(e)->id(), outputs->at(e)->index());
auto var = varSpace->getVariable(varId);
varSet->push_back(var->clone());
}
delete outputs;
}
delete varSpace;
return varSet;
}
VariablesSet* executeStoredGraph(Nd4jPointer *extraPointers, Nd4jLong graphId, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int* inputIndices, int numInputs) {
try {
return executeStoredGraphT(extraPointers, graphId, inputBuffers, inputShapes, inputIndices, numInputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getVariablesSetSize(sd::graph::VariablesSet* set) {
return set->size();
}
Nd4jStatus getVariablesSetStatus(sd::graph::VariablesSet* set) {
return set->status();
}
sd::graph::Variable* getVariable(sd::graph::VariablesSet* set, Nd4jLong i) {
return set->at(i);
}
int getVariableId(sd::graph::Variable* variable) {
return variable->id();
}
int getVariableIndex(sd::graph::Variable* variable) {
return variable->index();
}
const char* getVariableName(sd::graph::Variable* variable) {
return variable->getName()->c_str();
}
Nd4jLong const* getVariableShape(sd::graph::Variable* variable) {
return variable->getNDArray()->shapeInfo();
}
void* getVariableBuffer(sd::graph::Variable* variable) {
return variable->getNDArray()->buffer();
}
int unregisterGraph(Nd4jPointer *extraPointers, Nd4jLong graphId) {
try {
sd::graph::GraphHolder::getInstance()->dropGraphAny(graphId);
return ND4J_STATUS_OK;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
void deletePointerArray(Nd4jPointer pointer) {
Nd4jPointer *ptr = reinterpret_cast<Nd4jPointer *>(pointer);
delete[] ptr;
}
void deleteCharArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<char *>(pointer);
delete[] ptr;
}
void deleteIntArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<int *>(pointer);
delete[] ptr;
}
void deleteLongArray(Nd4jPointer pointer) {
auto ptr = reinterpret_cast<Nd4jLong *>(pointer);
delete[] ptr;
}
void deleteVariablesSet(sd::graph::VariablesSet* pointer) {
delete pointer;
}
void deleteShapeList(Nd4jPointer shapeList) {
sd::ShapeList* list = reinterpret_cast<sd::ShapeList*>(shapeList);
//list->destroy();
delete list;
}
const char* getAllOperations() {
return sd::OpTracker::getInstance()->exportOperations();
}
Nd4jPointer getGraphState(Nd4jLong id) {
return (Nd4jPointer) new sd::graph::GraphState(id);
}
void deleteGraphState(Nd4jPointer state) {
auto stateP = reinterpret_cast<sd::graph::GraphState*>(state);
delete stateP;
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, sd::graph::GraphState *state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
/**
* That's basically exec, with VariableSpace provided in GraphState:
* depending on operation (i.e. while of if), different logic executors could be used
*/
auto graph = state->graph();
auto varSpace = state->variableSpace();
// Node is dynamically created, and has nothing beyond it: only inputs and outputs
// this node has id of 0, and inputs are
Node node(OpType_LOGIC, opHash, 0);
// mapping inputs
for (int e = 0; e < numInputs; e++) {
auto buffer = inputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(inputShapes[e]);
auto array = new sd::NDArray(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace
varSpace->putVariable(0, e, array);
node.pickInput(0, e);
}
// mapping scopes
for (int e = 0; e < numScopes; e++) {
// we should check scope existence in GraphState/Graph
int scopeId = (int) scopes[e];
if (!state->hasScope(scopeId)) {
// nd4j_printf("execCustomOpWithScope: referenced scope [%i] doesn't exist\n", scopeId);
return Status::THROW();
}
node.pickInput(scopeId, 0);
}
auto dZ = LogicExecutor::processNode(graph, &node);
if (dZ != Status::OK())
return dZ;
// mapping outputs
for (int e = 0; e < numOutputs; e++) {
auto buffer = outputBuffers[e];
auto shapeInfo = reinterpret_cast<Nd4jLong *>(outputShapes[e]);
NDArray array(buffer, shapeInfo, varSpace->launchContext());
// now we just put array to VarSpace to the same ID
//varSpace->putVariable(0, e, array);
auto t = varSpace->getVariable(0, e)->getNDArray();
array.assign(t);
}
// removing input variables
for (int e = 0; e < numInputs; e++) {
varSpace->dropVariable(0, e);
}
// after some bla-bla-bla we should have Graph and Node for current op
return Status::OK();
}
Nd4jStatus execCustomOpWithScope(Nd4jPointer *extraPointers, Nd4jPointer state, Nd4jLong opHash, Nd4jLong *scopes, int numScopes, Nd4jPointer *inputBuffers, Nd4jPointer *inputShapes, int numInputs, Nd4jPointer *outputBuffers, Nd4jPointer *outputShapes, int numOutputs) {
try {
return execCustomOpWithScope(extraPointers, reinterpret_cast<sd::graph::GraphState *>(state), opHash, scopes,
numScopes, inputBuffers, inputShapes, numInputs, outputBuffers, outputShapes,
numOutputs);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return 1;
}
}
void deleteResultWrapper(Nd4jPointer ptr) {
// just 0 room for compiler s@!t
auto p = reinterpret_cast<sd::graph::ResultWrapper *>(ptr);
delete p;
}
int estimateThreshold(Nd4jPointer *extraPointers, Nd4jPointer dX, Nd4jLong const* dXShapeInfo, int N, float threshold) {
throw std::runtime_error("estimateThreshold: Not implemented yet");
}
/*
* TypeDef:
* void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, long N, int dstType, Nd4jPointer dZ);
*/
void convertTypes(Nd4jPointer *extras, int srcType, Nd4jPointer dX, Nd4jLong N, int dstType, Nd4jPointer dZ) {
try {
auto dx = reinterpret_cast<void *>(dX);
auto dz = reinterpret_cast<void *>(dZ);
if (srcType == ND4J_FLOAT8) {
if (dstType == ND4J_FLOAT8) {
// convertKernel<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::uint8>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::int16>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
//sd::TypeCast::convertGenericCuda<sd::float8, sd::uint16>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
//sd::TypeCast::convertGenericCuda<sd::float8, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//sd::TypeCast::convertGenericCuda<sd::float8, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT8) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<sd::int8, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
//convertKernel<sd::int8, sd::int8>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: eventually we might want to add it
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_UINT8) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<uint8_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<uint8_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<uint8_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<uint8_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<uint8_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<uint8_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: still might want to add
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<uint8_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<uint8_t, double>(extras, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT16) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<float16, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float16, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float16, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float16, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float16, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float16, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO: .... ^^^
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<float16, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float16, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<float16>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_INT16) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<int16_t, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<int16_t, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<int16_t, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<int16_t, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<int16_t, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<int16_t, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
// TODO...
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<int16_t, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<int16_t, double>(extras, dx, N, dz);
} else {
printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_FLOAT24) {
} else if (srcType == ND4J_FLOAT32) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<float, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<float, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<float, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<float, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<float, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<float, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_DOUBLE) {
sd::TypeCast::convertGenericCuda<float, double>(extras, dx, N, dz);
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<float>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_DOUBLE) {
if (dstType == ND4J_FLOAT8) {
//sd::TypeCast::convertGenericCuda<double, sd::float8>(extras, dx, N, dz);
} else if (dstType == ND4J_INT8) {
sd::TypeCast::convertGenericCuda<double, int8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT8) {
sd::TypeCast::convertGenericCuda<double, uint8_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT16) {
sd::TypeCast::convertGenericCuda<double, float16>(extras, dx, N, dz);
} else if (dstType == ND4J_INT16) {
sd::TypeCast::convertGenericCuda<double, int16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_UINT16) {
sd::TypeCast::convertGenericCuda<double, uint16_t>(extras, dx, N, dz);
} else if (dstType == ND4J_FLOAT24) {
} else if (dstType == ND4J_FLOAT32) {
sd::TypeCast::convertGenericCuda<double, float>(extras, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//
} else if (dstType == ND4J_THRESHOLD) {
//sd::convertToThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else if (srcType == ND4J_THRESHOLD) {
if (dstType == ND4J_FLOAT16) {
//sd::convertFromThreshold<float16>(nullptr, dx, N, dz);
} else if (dstType == ND4J_FLOAT32) {
//sd::convertFromThreshold<float>(nullptr, dx, N, dz);
} else if (dstType == ND4J_DOUBLE) {
//sd::convertFromThreshold<double>(nullptr, dx, N, dz);
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} else {
nd4j_printf("Unsupported types conversion: [%i] -> [%i]\n", srcType, dstType);
}
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
Nd4jPointer createUtf8String(Nd4jPointer *extraPointers, const char *string, int length) {
auto u = new sd::utf8string(string, length);
return reinterpret_cast<Nd4jPointer>(u);
}
Nd4jLong getUtf8StringLength(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
return reinterpret_cast<sd::utf8string*>(ptr)->_length;
}
char* getUtf8StringBuffer(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
return reinterpret_cast<sd::utf8string*>(ptr)->_buffer;
}
void deleteUtf8String(Nd4jPointer *extraPointers, Nd4jPointer ptr) {
delete(reinterpret_cast<sd::utf8string*>(ptr));
}
///////////////////////////////////////////////////////////////////
template<typename T, typename I>
__global__ static void scatterUpdateCuda(const int opCode, const int numOfSubArrs,
void* vx, const Nd4jLong* xShapeInfo, const Nd4jLong *xOffsets,
void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets,
const void* vindexes) {
__shared__ T *x, *y;
__shared__ Nd4jLong arrLenX, arrLenY;
auto indexes = reinterpret_cast<const I*>(vindexes);
for (int e = 0; e < numOfSubArrs; e++ ) {
const auto xIndex = indexes[e];
const bool isOwner = xIndex < gridDim.x ? blockIdx.x == xIndex : blockIdx.x == xIndex % gridDim.x;
if (!isOwner)
continue;
if (threadIdx.x == 0) {
x = reinterpret_cast<T*>(vx) + xOffsets[xIndex];
y = reinterpret_cast<T*>(vy) + yOffsets[e];
arrLenX = shape::length(xShapeInfo);
arrLenY = shape::length(yShapeInfo);
}
__syncthreads();
if (arrLenX != arrLenY)
return;
for (Nd4jLong i = threadIdx.x; i < arrLenX; i += blockDim.x) {
const auto xOffset = shape::getIndexOffset(i, xShapeInfo);
const auto yOffset = shape::getIndexOffset(i, yShapeInfo);
switch (opCode) {
case 0:
x[xOffset] += y[yOffset];
break;
case 1:
x[xOffset] -= y[yOffset];
break;
case 2:
x[xOffset] *= y[yOffset];
break;
case 3:
x[xOffset] /= y[yOffset];
break;
case 4:
x[xOffset] = y[yOffset] - x[xOffset];
break;
case 5:
x[xOffset] = y[yOffset] / x[xOffset];
break;
case 6:
x[xOffset] = y[yOffset];
break;
default:
continue;
}
}
__syncthreads();
}
}
template<typename T, typename I>
__host__ static void scatterUpdateCudaLauncher(const cudaStream_t* stream, const int opCode, const int numOfSubArrs, void* vx, const Nd4jLong const* xShapeInfo, const Nd4jLong* xOffsets, void* vy, const Nd4jLong *yShapeInfo, const Nd4jLong *yOffsets, const void* indexes) {
scatterUpdateCuda<T, I><<<512, 256, MAX_NUM_THREADS, *stream>>>(opCode, numOfSubArrs, vx, xShapeInfo, xOffsets, vy, yShapeInfo, yOffsets, indexes);
}
//////////////////////////////////////////////////////////////////////////
void scatterUpdate(Nd4jPointer *extraPointers, int opCode, int numOfSubArrs,
void* hX, Nd4jLong const* hXShapeInfo, Nd4jLong const* hXOffsets,
void* dX, Nd4jLong const* dXShapeInfo, Nd4jLong const* dXOffsets,
void* hY, Nd4jLong const* hYShapeInfo, Nd4jLong const* hYOffsets,
void* dY, Nd4jLong const* dYShapeInfo, Nd4jLong const* dYOffsets,
void* hIindexes, Nd4jLong const* hIndicesShapeInfo, void* dIindexes, Nd4jLong const* dIndicesShapeInfo) {
try {
auto stream = reinterpret_cast<cudaStream_t *>(extraPointers[1]);
auto type = ArrayOptions::dataType(hXShapeInfo);
auto iType = ArrayOptions::dataType(hIndicesShapeInfo);
BUILD_DOUBLE_SELECTOR(type, iType, scatterUpdateCudaLauncher,
(stream, opCode, numOfSubArrs, dX, dXShapeInfo, dXOffsets, dY, dYShapeInfo, dYOffsets, dIindexes),
LIBND4J_TYPES, INDEXING_TYPES);
sd::DebugHelper::checkErrorCode(stream, "scatterUpdate(...) failed");
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void inspectArray(Nd4jPointer *extraPointers, Nd4jPointer buffer, Nd4jLong *shapeInfo, Nd4jPointer specialBuffer, Nd4jLong *specialShapeInfo, Nd4jPointer debugInfo) {
try {
LaunchContext lc(extraPointers[1], extraPointers[4], extraPointers[5], extraPointers[3]);
auto p = reinterpret_cast<sd::DebugInfo *>(debugInfo);
NDArray array(buffer, specialBuffer, shapeInfo, &lc);
sd::DebugHelper::retrieveDebugStatistics(p, &array);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
void __global__ tryPointerKernel(void* p, int len) {
auto buf = reinterpret_cast<int8_t*>(p);
auto tid = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int b;
if (tid < len)
atomicAdd(&b, buf[tid]);
__syncthreads();
if (threadIdx.x ==0 && blockIdx.x == 0)
printf("Pointer check complete: %i\n", b);
}
void tryPointer(Nd4jPointer extra, Nd4jPointer p, int len) {
try {
cudaStream_t stream;
cudaStreamCreate(&stream);
tryPointerKernel << < 256, 512, len + 64, stream >> > (p, len);
auto e = cudaStreamSynchronize(stream);
if (e != 0)
throw sd::cuda_exception::build("tryPointer failed", e);
cudaStreamDestroy(stream);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
int dataTypeFromNpyHeader(void *header) {
return (int) cnpy::dataTypeFromHeader(reinterpret_cast<char *>(header));
}
sd::ConstantDataBuffer* shapeBuffer(int rank, Nd4jLong *shape, Nd4jLong *strides, sd::DataType dtype, char order, Nd4jLong ews, bool empty) {
try {
auto buffer = new ConstantDataBuffer();
*buffer = sd::ConstantShapeHelper::getInstance()->bufferForShapeInfo(
ShapeDescriptor(dtype, order, shape, strides, rank, ews, empty));
return buffer;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
void deleteShapeBuffer(sd::ConstantDataBuffer* ptr) {
delete ptr;
}
void deleteTadPack(sd::TadPack* ptr) {
delete ptr;
}
bool isBlasVersionMatches(int major, int minor, int build) {
auto result = major == Environment::getInstance()->_blasMajorVersion && minor == Environment::getInstance()->_blasMinorVersion && build == Environment::getInstance()->_blasPatchVersion;
if (!result) {
nd4j_printf("CUDA/cuBLAS version mismatch. Expected: %i.%i.%i but got %i.%i.%i instead\n", Environment::getInstance()->_blasMajorVersion, Environment::getInstance()->_blasMinorVersion, Environment::getInstance()->_blasPatchVersion, major, minor, build);
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(152);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage("CUDA/cuBLAS version mismatch");
}
return result;
}
sd::ConstantDataBuffer* constantBufferLong(sd::DataType dtype, Nd4jLong const* data, int length) {
return sd::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer* constantBufferDouble(sd::DataType dtype, double *data, int length) {
return sd::ConstantHelper::getInstance()->constantBuffer(ConstantDescriptor(data, length), dtype);
}
sd::ConstantDataBuffer* constantBuffer(sd::DataType dtype, sd::ConstantDescriptor *descriptor) {
return sd::ConstantHelper::getInstance()->constantBuffer(*descriptor, dtype);
}
Nd4jPointer getConstantDataBufferPrimary(sd::ConstantDataBuffer* dbf) {
return dbf->primary();
}
Nd4jPointer getConstantDataBufferSpecial(sd::ConstantDataBuffer* dbf) {
return dbf->special();
}
Nd4jLong getConstantDataBufferLength(sd::ConstantDataBuffer* dbf) {
return dbf->length();
}
Nd4jLong getConstantDataBufferSizeOf(sd::ConstantDataBuffer* dbf) {
return dbf->sizeOf();
}
sd::graph::Context* createGraphContext(int nodeId) {
return new sd::graph::Context(nodeId);
}
sd::graph::RandomGenerator* getGraphContextRandomGenerator(sd::graph::Context* ptr) {
return &ptr->randomGenerator();
}
void markGraphContextInplace(sd::graph::Context* ptr, bool reallyInplace) {
ptr->markInplace(reallyInplace);
}
void setGraphContextCudaContext(sd::graph::Context* ptr, void *stream, void *reductionPointer, void *allocationPointer) {
ptr->setCudaContext(stream, reductionPointer, allocationPointer);
}
void setGraphContextInputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextOutputArray(sd::graph::Context* ptr, int index, void *buffer, void *shapeInfo, void *specialBuffer, void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialBuffer, specialShapeInfo);
}
void setGraphContextInputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) {
ptr->setInputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextOutputBuffer(OpaqueContext* ptr, int index, OpaqueDataBuffer *buffer, void *shapeInfo, void *specialShapeInfo) {
ptr->setOutputArray(index, buffer, shapeInfo, specialShapeInfo);
}
void setGraphContextTArguments(sd::graph::Context* ptr, double *arguments, int numberOfArguments) {
ptr->setTArguments(arguments, numberOfArguments);
}
void setGraphContextIArguments(sd::graph::Context* ptr, Nd4jLong *arguments, int numberOfArguments) {
ptr->setIArguments(arguments, numberOfArguments);
}
void setGraphContextBArguments(sd::graph::Context* ptr, bool *arguments, int numberOfArguments) {
ptr->setBArguments(arguments, numberOfArguments);
}
void setGraphContextDArguments(OpaqueContext* ptr, int *arguments, int numberOfArguments) {
std::vector<sd::DataType> dtypes(numberOfArguments);
for (int e = 0; e < numberOfArguments; e++)
dtypes[e] = (sd::DataType) arguments[e];
ptr->setDArguments(dtypes);
}
void deleteGraphContext(sd::graph::Context* ptr) {
delete ptr;
}
sd::graph::RandomGenerator* createRandomGenerator(Nd4jLong rootSeed, Nd4jLong nodeSeed) {
try {
return new sd::graph::RandomGenerator(rootSeed, nodeSeed);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getRandomGeneratorRootState(sd::graph::RandomGenerator* ptr) {
return ptr->rootState();
}
Nd4jLong getRandomGeneratorNodeState(sd::graph::RandomGenerator* ptr) {
return ptr->nodeState();
}
void setRandomGeneratorStates(sd::graph::RandomGenerator* ptr, Nd4jLong rootSeed, Nd4jLong nodeSeed) {
ptr->setStates(rootSeed, nodeSeed);
}
int getRandomGeneratorRelativeInt(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeInt(index);
}
Nd4jLong getRandomGeneratorRelativeLong(sd::graph::RandomGenerator* ptr, Nd4jLong index) {
return ptr->relativeLong(index);
}
void deleteRandomGenerator(sd::graph::RandomGenerator* ptr) {
delete ptr;
}
Nd4jPointer shapeBufferForNumpy(Nd4jPointer npyArray) {
try {
cnpy::NpyArray arr = cnpy::loadNpyFromPointer(reinterpret_cast<char *>(npyArray));
unsigned int shapeSize = arr.shape.size();
std::vector<Nd4jLong> shape(shapeSize);
bool _empty = false;
for (unsigned int i = 0; i < shapeSize; i++) {
shape[i] = arr.shape[i];
if (arr.shape[i] == 0)
_empty = true;
}
auto dtype = cnpy::dataTypeFromHeader(reinterpret_cast<char *>(npyArray));
Nd4jLong *shapeBuffer;
if (shape.size() == 1 && shape[0] == 0) {
// scalar case
shapeBuffer = sd::ShapeBuilders::createScalarShapeInfo(dtype);
} else if (_empty) {
if (shapeSize > 0)
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
else
shapeBuffer = sd::ShapeBuilders::emptyShapeInfo(dtype);
} else {
shapeBuffer = sd::ShapeBuilders::createShapeInfo(dtype, arr.fortranOrder ? 'f' : 'c', shape);
}
return (Nd4jPointer)(sd::ConstantShapeHelper::getInstance()->createFromExisting(shapeBuffer, true)); // TO DO: this can lead to unpleasant crash sometimes
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
const char* runLightBenchmarkSuit(bool printOut) {
try {
sd::LightBenchmarkSuit suit;
auto result = suit.runSuit();
if (printOut)
nd4j_printf("%s\n", result.data());
auto chars = new char[result.length() + 1];
std::memcpy(chars, result.data(), result.length());
chars[result.length()] = (char) 0x0;
return chars;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
const char* runFullBenchmarkSuit(bool printOut) {
try {
sd::FullBenchmarkSuit suit;
auto result = suit.runSuit();
if (printOut)
nd4j_printf("%s\n", result.data());
auto chars = new char[result.length() + 1];
std::memcpy(chars, result.data(), result.length());
chars[result.length()] = (char) 0x0;
return chars;
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jLong getCachedMemory(int deviceId) {
return sd::ConstantHelper::getInstance()->getCachedAmount(deviceId);
}
sd::LaunchContext* defaultLaunchContext() {
return LaunchContext::defaultContext();
}
Nd4jPointer lcScalarPointer(OpaqueLaunchContext* lc) {
return lc->getScalarPointer();
}
Nd4jPointer lcReductionPointer(OpaqueLaunchContext* lc) {
return lc->getReductionPointer();
}
Nd4jPointer lcAllocationPointer(OpaqueLaunchContext* lc) {
return lc->getAllocationPointer();
}
Nd4jPointer lcExecutionStream(OpaqueLaunchContext* lc) {
return lc->getCudaStream();
}
Nd4jPointer lcCopyStream(OpaqueLaunchContext* lc) {
return lc->getCudaSpecialStream();
}
Nd4jPointer lcBlasHandle(OpaqueLaunchContext* lc) {
return lc->getCublasHandle();
}
Nd4jPointer lcSolverHandle(OpaqueLaunchContext* lc) {
return lc->getCusolverHandle();
}
int lastErrorCode() {
return sd::LaunchContext::defaultContext()->errorReference()->errorCode();
}
const char* lastErrorMessage() {
return sd::LaunchContext::defaultContext()->errorReference()->errorMessage();
}
void ctxShapeFunctionOverride(OpaqueContext* ptr, bool reallyOverride) {
ptr->setShapeFunctionOverride(reallyOverride);
}
void ctxPurge(OpaqueContext* ptr) {
ptr->clearFastPath();
}
int binaryLevel() {
return 0;
}
int optimalLevel() {
return 0;
}
bool isMinimalRequirementsMet() {
return true;
}
bool isOptimalRequirementsMet() {
return true;
}
void ctxAllowHelpers(OpaqueContext* ptr, bool reallyAllow) {
ptr->allowHelpers(reallyAllow);
}
void ctxSetExecutionMode(OpaqueContext* ptr, int execMode) {
if (execMode < 0 || execMode > 2)
execMode = 0;
ptr->setExecutionMode((samediff::ExecutionMode) execMode);
}
OpaqueDataBuffer* dbCreateExternalDataBuffer(Nd4jLong elements, int dataType, Nd4jPointer primary, Nd4jPointer special) {
auto buffer = dbAllocateDataBuffer(0, dataType, false);
if (primary != nullptr)
buffer->setPrimary(primary, elements);
if (special != nullptr)
buffer->setSpecial(special, elements);
return buffer;
}
OpaqueDataBuffer* dbAllocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) {
return allocateDataBuffer(elements, dataType, allocateBoth);
}
OpaqueDataBuffer* allocateDataBuffer(Nd4jLong elements, int dataType, bool allocateBoth) {
try {
auto dtype = DataTypeUtils::fromInt(dataType);
return new sd::InteropDataBuffer(elements * DataTypeUtils::sizeOf(dtype), dtype, allocateBoth);
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
return nullptr;
}
}
Nd4jPointer dbPrimaryBuffer(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->primary();
}
Nd4jPointer dbSpecialBuffer(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->special();
}
void deleteDataBuffer(OpaqueDataBuffer *dataBuffer) {
delete dataBuffer;
}
void dbSetPrimaryBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer primaryBuffer, Nd4jLong numBytes) {
dataBuffer->setPrimary(primaryBuffer, numBytes);
}
void dbSetSpecialBuffer(OpaqueDataBuffer *dataBuffer, Nd4jPointer specialBuffer, Nd4jLong numBytes) {
dataBuffer->setSpecial(specialBuffer, numBytes);
}
void dbAllocatePrimaryBuffer(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->allocatePrimary();
}
void dbAllocateSpecialBuffer(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->allocateSpecial();
}
void dbExpandBuffer(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) {
try {
dataBuffer->dataBuffer()->expand(elements * DataTypeUtils::sizeOf(dataBuffer->dataBuffer()->getDataType()));
} catch (std::exception &e) {
sd::LaunchContext::defaultContext()->errorReference()->setErrorCode(1);
sd::LaunchContext::defaultContext()->errorReference()->setErrorMessage(e.what());
}
}
OpaqueDataBuffer* dbCreateView(OpaqueDataBuffer *dataBuffer, Nd4jLong length, Nd4jLong offset) {
return new InteropDataBuffer(*dataBuffer, length, offset);
}
void dbSyncToSpecial(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->syncToSpecial();
}
void dbSyncToPrimary(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->syncToPrimary(nullptr);
}
void dbTickHostRead(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->readPrimary();
}
void dbTickHostWrite(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->writePrimary();
}
void dbTickDeviceRead(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->readSpecial();
}
void dbTickDeviceWrite(OpaqueDataBuffer *dataBuffer) {
dataBuffer->dataBuffer()->writeSpecial();
}
void dbExpand(OpaqueDataBuffer *dataBuffer, Nd4jLong elements) {
dataBuffer->expand(elements);
}
void dbClose(OpaqueDataBuffer *dataBuffer) {
dataBuffer->getDataBuffer()->close();
}
int dbDeviceId(OpaqueDataBuffer *dataBuffer) {
return dataBuffer->deviceId();
}
void dbSetDeviceId(OpaqueDataBuffer *dataBuffer, int deviceId) {
dataBuffer->setDeviceId(deviceId);
}
int dbLocality(OpaqueDataBuffer *dataBuffer) {
auto p = dataBuffer->dataBuffer()->isPrimaryActual();
auto d = dataBuffer->dataBuffer()->isSpecialActual();
if (p && d)
return 0;
else if (p)
return -1;
else
return 1;
}
|
64e868ea32e42809cec367fee67564d7ded45725.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include "repeat.h"
const int page_size = 4; // Scale stride and arrays by page size.
__global__ void global_latency (unsigned long ** my_array, int array_length, int iterations, int ignore_iterations, unsigned long long * duration) {
unsigned long start_time, end_time;
unsigned long *j = (unsigned long*)my_array;
volatile unsigned long long sum_time;
sum_time = 0;
duration[0] = 0;
for (int k = -ignore_iterations; k < iterations; k++) {
if (k==0) {
sum_time = 0; // ignore some iterations: cold icache misses
}
start_time = clock();
repeat256(j=*(unsigned long **)j;)
end_time = clock();
sum_time += (end_time - start_time);
}
((unsigned long*)my_array)[array_length] = (unsigned long)j;
((unsigned long*)my_array)[array_length+1] = (unsigned long) sum_time;
duration[0] = sum_time;
}
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
/* Construct an array of N unsigned longs, with array elements initialized
so kernel will make stride accesses to the array. Then launch kernel
10 times, each making iterations*256 global memory accesses. */
void parametric_measure_global(int N, int iterations, int ignore_iterations, int stride) {
int i;
unsigned long * h_a;
unsigned long ** d_a;
unsigned long long * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
// Don't die if too much memory was requested.
if (N > 241600000) { printf ("OOM.\n"); return; }
/* allocate arrays on CPU */
h_a = (unsigned long *)malloc(sizeof(unsigned long) * (N+2));
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long) * (N+2));
hipMalloc ((void **) &duration, sizeof(unsigned long long));
/* initialize array elements on CPU with pointers into d_a. */
int step = gcf (stride, N); // Optimization: Initialize fewer elements.
for (i = 0; i < N; i += step) {
// Device pointers are 32-bit on GT200.
h_a[i] = ((unsigned long)(uintptr_t)d_a) + ((i + stride) % N)*sizeof(unsigned long);
}
h_a[N] = 0;
h_a[N+1] = 0;
hipDeviceSynchronize ();
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long) * N, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
/* Launch a multiple of 10 iterations of the same kernel and take the average to eliminate interconnect (TPCs) effects */
for (int l=0; l <10; l++) {
/* launch kernel*/
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
// printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
hipLaunchKernelGGL(( global_latency) , dim3(Dg), dim3(Db), 0, 0, d_a, N, iterations, ignore_iterations, duration);
hipDeviceSynchronize ();
hipError_t error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipDeviceSynchronize ();
//hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long) * (N+2), hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
latency_sum+=latency[0];
}
/* free memory on GPU */
hipFree(d_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(latency);
printf("%f\n", (double)(latency_sum/(10*256.0*iterations)) );
}
/* Test page size. Construct an access pattern of N elements spaced stride apart,
followed by a gap of stride+offset, followed by N more elements spaced stride
apart. */
void measure_pagesize(int N, int stride, int offset) {
unsigned long ** h_a;
unsigned long ** d_a;
unsigned long long * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
const int size = N * stride * 2 + offset + stride*2;
const int iterations = 20;
// Don't die if too much memory was requested.
if (size > 241600000) { printf ("OOM.\n"); return; }
/* allocate array on CPU */
h_a = (unsigned long **)malloc(8 * size);
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
/* allocate array on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long) * size);
hipMalloc ((void **) &duration, sizeof(unsigned long long));
/* initialize array elements on CPU */
for (int i=0;i<N; i++)
((unsigned long *)h_a)[i*stride] = ((i*stride + stride)*8) + (uintptr_t) d_a;
((unsigned long *)h_a)[(N-1)*stride] = ((N*stride + offset)*8) + (uintptr_t) d_a; //point last element to stride+offset
for (int i=0;i<N; i++)
((unsigned long *)h_a)[(i+N)*stride+offset] = (((i+N)*stride + offset + stride)*8) + (uintptr_t) d_a;
((unsigned long *)h_a)[(2*N-1)*stride+offset] = (uintptr_t) d_a; //wrap around.
hipDeviceSynchronize ();
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long) * size, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
for (int l=0; l < 10 ; l++) {
/* launch kernel*/
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
hipLaunchKernelGGL(( global_latency) , dim3(Dg), dim3(Db), 0, 0, d_a, N, iterations, 1, duration);
hipDeviceSynchronize ();
hipError_t error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipDeviceSynchronize ();
//hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
latency_sum+=latency[0];
}
/* free memory on GPU */
hipFree(d_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(latency);
printf("%f\n", (double)(latency_sum/(10.0*256*iterations)));
}
void measure_global1() {
// we will measure latency of global memory
// One thread that accesses an array.
// loads are dependent on the previously loaded values
int N, iterations, stride;
// initialize upper bounds here
int stride_upper_bound;
printf("Global1: Global memory latency for 1 KB array and varying strides.\n");
printf(" stride (bytes), latency (clocks)\n");
N=256; // 131072;
iterations = 4;
stride_upper_bound = N;
for (stride = 1; stride <= (stride_upper_bound) ; stride+=1) {
printf (" %5d, ", stride*8);
parametric_measure_global(N, iterations, 1, stride);
}
}
void measure_global5() {
int N, iterations, stride;
// initialize upper bounds here
printf("\nGlobal5: Global memory latency for %d KB stride.\n", 128 * page_size/4);
printf(" Array size (KB), latency (clocks)\n");
iterations = 1;
stride = 128 * 1024 / 8;
for (N = (1*128*1024); N <= (16*1024*1024); N += stride) {
printf (" %5d, ", N*8/1024 * page_size/4);
parametric_measure_global(N*page_size/4, iterations, 1, stride *page_size/4);
}
}
void measure_global_dibs() {
int N, iterations, stride;
// initialize upper bounds here
printf("\nGlobalDibs: Global memory latency for %d KB stride.\n", 512 * page_size/4);
printf(" Array size (KB), latency (clocks)\n");
iterations = 1;
stride = 4 * 1024 / 8;
for (N = (1*1024); N <= (2*1024*1024); N += stride) {
printf (" %5d, ", N*8/1024 * page_size/4);
parametric_measure_global(N*page_size/4, iterations, 1, stride *page_size/4);
}
}
void measure_global6() {
int N, stride, entries;
printf("\nGlobal6: Testing associativity of L1 TLB.\n");
printf(" entries, array size (KB), stride (KB), latency\n");
for (entries = 16; entries <= 128; entries++) {
for (stride = 1; stride <= (4*1024*1024); stride *= 2 ) {
for (int substride = 1; substride < 16; substride *= 2 ) {
int stride2 = stride * sqrt(sqrt(substride)) + 0.5;
N = entries * stride2;
printf (" %d, %7.2f, %7f, ", entries, N*8/1024.0*page_size/4, stride2*8/1024.0*page_size/4);
parametric_measure_global(N*page_size/4, 4, 1, stride2*page_size/4);
}
}
}
}
void measure_global4() //TODO
{
printf ("\nGlobal4: Measuring L2 TLB page size using %d MB stride\n", 2 * page_size/4);
printf (" offset (bytes), latency (clocks)\n");
// Small offsets (approx. page size) are interesting. Search much bigger offsets to
// ensure nothing else interesting happens.
for (int offset = -8192/8; offset <= (2097152+1536)/8; offset += (offset < 1536) ? 128/8 : 4096/8)
{
printf (" %d, ", offset*8 *page_size/4);
measure_pagesize(10, 2097152/8 *page_size/4, offset* page_size/4);
}
}
__global__ void ptw_thread_kernel(unsigned long ** gpuArr, unsigned long **largeGPUArr, unsigned long gpuArrSize,
int iterations, int ignore_iterations, unsigned long long * duration,
int numAccess, int numThreads, int N) {
unsigned long start_time, end_time;
unsigned long *j = (unsigned long*)(gpuArr+(threadIdx.x*N/sizeof(unsigned long)));
volatile unsigned long long sum_time;
sum_time = 0;
duration[0] = 0;
if (threadIdx.x == 0) {
for (int i = 0; i<512*1024*1024/8; i++) {
largeGPUArr[i] = (unsigned long *) i+1; //scam
}
}
__syncthreads();
for (int k = -ignore_iterations; k < iterations; k++) {
if (k==0) {
sum_time = 0; // ignore some iterations: cold icache misses
}
// Do our striding
//printf("Thread id: %d\n", threadIdx.x);
start_time = clock();
//printf("Thread id 334: %d\n", threadIdx.x);
repeat256(j=*(unsigned long **)j;__syncthreads();)
//printf("Thread id 336: %d\n", threadIdx.x);
end_time = clock();
//printf("Thread id 338: %d\n", threadIdx.x);
sum_time += (end_time - start_time);
//printf("Time: %lld Thread ID: %d\n", sum_time, threadIdx.x);
}
((unsigned long*)gpuArr)[gpuArrSize + threadIdx.x] = (unsigned long)j;
((unsigned long*)gpuArr)[gpuArrSize+ numThreads + threadIdx.x] = (unsigned long) sum_time;
if (threadIdx.x == numThreads-1) {
duration[0] = sum_time;
}
}
void measure_ptw_thread(unsigned long numThreads) {
// printf("\n Measuring # of PTW Threads with %d threads used...\n", numThreads);
unsigned long start_time, end_time;
unsigned long *cpuArr;
unsigned long **gpuArr;
unsigned long N = 128*1024;
unsigned long numAccess = 256; // accesses per thread,
unsigned long totalMem = N * numThreads * numAccess;
unsigned long *largeCPUArr;
unsigned long **largeGPUArr;
unsigned long long * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
hipMalloc ((void **) &duration, sizeof(unsigned long long));
// malloc for cpu array
cpuArr = (unsigned long *)malloc(totalMem);
largeCPUArr = (unsigned long *)malloc(512*1024*1024);
hipMalloc ((void **) &gpuArr, totalMem + sizeof(unsigned long) * (numThreads * 2 + 5)); // 5 because we don't trust ourselves
hipMalloc ((void **) &largeGPUArr, 512*1024*1024);
for (long i = 0; i < totalMem/(sizeof(unsigned long)); i += N/(sizeof(unsigned long))) {
// Device pointers are 64-bit on what we are using.
cpuArr[i] = ((unsigned long)(uintptr_t)gpuArr) + ((i + (numThreads * N/sizeof(unsigned long)))%N * sizeof(unsigned long));
}
for (long i = 0; i < 512*1024*1024/8; i++) {
largeCPUArr[i] = i;
}
hipDeviceSynchronize ();
/* copy array elements from CPU to GPU */
hipMemcpy((void *)gpuArr, (void *)cpuArr, totalMem, hipMemcpyHostToDevice);
hipMemcpy((void *)largeGPUArr, (void *)largeCPUArr, 512*1024*1024, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
// h_a[N] = 0; we don't need this
// h_a[N+1] = 0;
for (int l=0; l <10; l++) {
/* launch kernel*/
dim3 Db = dim3(numThreads);
dim3 Dg = dim3(1);
// Pray and launch our kernel
hipLaunchKernelGGL(( ptw_thread_kernel) , dim3(Dg), dim3(Db), 0, 0, gpuArr, largeGPUArr, totalMem/sizeof(unsigned long), 1, 0, duration, numAccess, numThreads, N); //don't ignore the first iteration
hipDeviceSynchronize ();
hipError_t error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error is %s\n", hipGetErrorString(error_id));
}
hipDeviceSynchronize ();
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
latency_sum+=latency[0];
}
/* free memory on GPU */
hipFree(gpuArr);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(cpuArr);
free(latency);
printf("%d,%f\n", numThreads, (double)(latency_sum/(10*256.0)) );
}
int main() {
printf("Assuming page size is %d KB\n", page_size);
// printf("%d\n", sizeof(long));
// printf("%d\n", sizeof(long long));
// measure_global_dibs();
// measure_global1();
// measure_global4();
// measure_global5();
// measure_global6();
for (unsigned long i = 1; i<=64; i++) {
measure_ptw_thread(i);
}
return 0;
}
|
64e868ea32e42809cec367fee67564d7ded45725.cu
|
#include <stdio.h>
#include <stdint.h>
#include "repeat.h"
const int page_size = 4; // Scale stride and arrays by page size.
__global__ void global_latency (unsigned long ** my_array, int array_length, int iterations, int ignore_iterations, unsigned long long * duration) {
unsigned long start_time, end_time;
unsigned long *j = (unsigned long*)my_array;
volatile unsigned long long sum_time;
sum_time = 0;
duration[0] = 0;
for (int k = -ignore_iterations; k < iterations; k++) {
if (k==0) {
sum_time = 0; // ignore some iterations: cold icache misses
}
start_time = clock();
repeat256(j=*(unsigned long **)j;)
end_time = clock();
sum_time += (end_time - start_time);
}
((unsigned long*)my_array)[array_length] = (unsigned long)j;
((unsigned long*)my_array)[array_length+1] = (unsigned long) sum_time;
duration[0] = sum_time;
}
int gcf(int a, int b)
{
if (a == 0) return b;
return gcf(b % a, a);
}
/* Construct an array of N unsigned longs, with array elements initialized
so kernel will make stride accesses to the array. Then launch kernel
10 times, each making iterations*256 global memory accesses. */
void parametric_measure_global(int N, int iterations, int ignore_iterations, int stride) {
int i;
unsigned long * h_a;
unsigned long ** d_a;
unsigned long long * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
// Don't die if too much memory was requested.
if (N > 241600000) { printf ("OOM.\n"); return; }
/* allocate arrays on CPU */
h_a = (unsigned long *)malloc(sizeof(unsigned long) * (N+2));
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long) * (N+2));
cudaMalloc ((void **) &duration, sizeof(unsigned long long));
/* initialize array elements on CPU with pointers into d_a. */
int step = gcf (stride, N); // Optimization: Initialize fewer elements.
for (i = 0; i < N; i += step) {
// Device pointers are 32-bit on GT200.
h_a[i] = ((unsigned long)(uintptr_t)d_a) + ((i + stride) % N)*sizeof(unsigned long);
}
h_a[N] = 0;
h_a[N+1] = 0;
cudaThreadSynchronize ();
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long) * N, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
/* Launch a multiple of 10 iterations of the same kernel and take the average to eliminate interconnect (TPCs) effects */
for (int l=0; l <10; l++) {
/* launch kernel*/
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
// printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
global_latency <<<Dg, Db>>>(d_a, N, iterations, ignore_iterations, duration);
cudaThreadSynchronize ();
cudaError_t error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaThreadSynchronize ();
//cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long) * (N+2), cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
latency_sum+=latency[0];
}
/* free memory on GPU */
cudaFree(d_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(latency);
printf("%f\n", (double)(latency_sum/(10*256.0*iterations)) );
}
/* Test page size. Construct an access pattern of N elements spaced stride apart,
followed by a gap of stride+offset, followed by N more elements spaced stride
apart. */
void measure_pagesize(int N, int stride, int offset) {
unsigned long ** h_a;
unsigned long ** d_a;
unsigned long long * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
const int size = N * stride * 2 + offset + stride*2;
const int iterations = 20;
// Don't die if too much memory was requested.
if (size > 241600000) { printf ("OOM.\n"); return; }
/* allocate array on CPU */
h_a = (unsigned long **)malloc(8 * size);
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
/* allocate array on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long) * size);
cudaMalloc ((void **) &duration, sizeof(unsigned long long));
/* initialize array elements on CPU */
for (int i=0;i<N; i++)
((unsigned long *)h_a)[i*stride] = ((i*stride + stride)*8) + (uintptr_t) d_a;
((unsigned long *)h_a)[(N-1)*stride] = ((N*stride + offset)*8) + (uintptr_t) d_a; //point last element to stride+offset
for (int i=0;i<N; i++)
((unsigned long *)h_a)[(i+N)*stride+offset] = (((i+N)*stride + offset + stride)*8) + (uintptr_t) d_a;
((unsigned long *)h_a)[(2*N-1)*stride+offset] = (uintptr_t) d_a; //wrap around.
cudaThreadSynchronize ();
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long) * size, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
for (int l=0; l < 10 ; l++) {
/* launch kernel*/
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
global_latency <<<Dg, Db>>>(d_a, N, iterations, 1, duration);
cudaThreadSynchronize ();
cudaError_t error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaThreadSynchronize ();
//cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
latency_sum+=latency[0];
}
/* free memory on GPU */
cudaFree(d_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(latency);
printf("%f\n", (double)(latency_sum/(10.0*256*iterations)));
}
void measure_global1() {
// we will measure latency of global memory
// One thread that accesses an array.
// loads are dependent on the previously loaded values
int N, iterations, stride;
// initialize upper bounds here
int stride_upper_bound;
printf("Global1: Global memory latency for 1 KB array and varying strides.\n");
printf(" stride (bytes), latency (clocks)\n");
N=256; // 131072;
iterations = 4;
stride_upper_bound = N;
for (stride = 1; stride <= (stride_upper_bound) ; stride+=1) {
printf (" %5d, ", stride*8);
parametric_measure_global(N, iterations, 1, stride);
}
}
void measure_global5() {
int N, iterations, stride;
// initialize upper bounds here
printf("\nGlobal5: Global memory latency for %d KB stride.\n", 128 * page_size/4);
printf(" Array size (KB), latency (clocks)\n");
iterations = 1;
stride = 128 * 1024 / 8;
for (N = (1*128*1024); N <= (16*1024*1024); N += stride) {
printf (" %5d, ", N*8/1024 * page_size/4);
parametric_measure_global(N*page_size/4, iterations, 1, stride *page_size/4);
}
}
void measure_global_dibs() {
int N, iterations, stride;
// initialize upper bounds here
printf("\nGlobalDibs: Global memory latency for %d KB stride.\n", 512 * page_size/4);
printf(" Array size (KB), latency (clocks)\n");
iterations = 1;
stride = 4 * 1024 / 8;
for (N = (1*1024); N <= (2*1024*1024); N += stride) {
printf (" %5d, ", N*8/1024 * page_size/4);
parametric_measure_global(N*page_size/4, iterations, 1, stride *page_size/4);
}
}
void measure_global6() {
int N, stride, entries;
printf("\nGlobal6: Testing associativity of L1 TLB.\n");
printf(" entries, array size (KB), stride (KB), latency\n");
for (entries = 16; entries <= 128; entries++) {
for (stride = 1; stride <= (4*1024*1024); stride *= 2 ) {
for (int substride = 1; substride < 16; substride *= 2 ) {
int stride2 = stride * sqrt(sqrt(substride)) + 0.5;
N = entries * stride2;
printf (" %d, %7.2f, %7f, ", entries, N*8/1024.0*page_size/4, stride2*8/1024.0*page_size/4);
parametric_measure_global(N*page_size/4, 4, 1, stride2*page_size/4);
}
}
}
}
void measure_global4() //TODO
{
printf ("\nGlobal4: Measuring L2 TLB page size using %d MB stride\n", 2 * page_size/4);
printf (" offset (bytes), latency (clocks)\n");
// Small offsets (approx. page size) are interesting. Search much bigger offsets to
// ensure nothing else interesting happens.
for (int offset = -8192/8; offset <= (2097152+1536)/8; offset += (offset < 1536) ? 128/8 : 4096/8)
{
printf (" %d, ", offset*8 *page_size/4);
measure_pagesize(10, 2097152/8 *page_size/4, offset* page_size/4);
}
}
__global__ void ptw_thread_kernel(unsigned long ** gpuArr, unsigned long **largeGPUArr, unsigned long gpuArrSize,
int iterations, int ignore_iterations, unsigned long long * duration,
int numAccess, int numThreads, int N) {
unsigned long start_time, end_time;
unsigned long *j = (unsigned long*)(gpuArr+(threadIdx.x*N/sizeof(unsigned long)));
volatile unsigned long long sum_time;
sum_time = 0;
duration[0] = 0;
if (threadIdx.x == 0) {
for (int i = 0; i<512*1024*1024/8; i++) {
largeGPUArr[i] = (unsigned long *) i+1; //scam
}
}
__syncthreads();
for (int k = -ignore_iterations; k < iterations; k++) {
if (k==0) {
sum_time = 0; // ignore some iterations: cold icache misses
}
// Do our striding
//printf("Thread id: %d\n", threadIdx.x);
start_time = clock();
//printf("Thread id 334: %d\n", threadIdx.x);
repeat256(j=*(unsigned long **)j;__syncthreads();)
//printf("Thread id 336: %d\n", threadIdx.x);
end_time = clock();
//printf("Thread id 338: %d\n", threadIdx.x);
sum_time += (end_time - start_time);
//printf("Time: %lld Thread ID: %d\n", sum_time, threadIdx.x);
}
((unsigned long*)gpuArr)[gpuArrSize + threadIdx.x] = (unsigned long)j;
((unsigned long*)gpuArr)[gpuArrSize+ numThreads + threadIdx.x] = (unsigned long) sum_time;
if (threadIdx.x == numThreads-1) {
duration[0] = sum_time;
}
}
void measure_ptw_thread(unsigned long numThreads) {
// printf("\n Measuring # of PTW Threads with %d threads used...\n", numThreads);
unsigned long start_time, end_time;
unsigned long *cpuArr;
unsigned long **gpuArr;
unsigned long N = 128*1024;
unsigned long numAccess = 256; // accesses per thread,
unsigned long totalMem = N * numThreads * numAccess;
unsigned long *largeCPUArr;
unsigned long **largeGPUArr;
unsigned long long * duration;
unsigned long long * latency;
unsigned long long latency_sum = 0;
latency = (unsigned long long *)malloc(sizeof(unsigned long long));
cudaMalloc ((void **) &duration, sizeof(unsigned long long));
// malloc for cpu array
cpuArr = (unsigned long *)malloc(totalMem);
largeCPUArr = (unsigned long *)malloc(512*1024*1024);
cudaMalloc ((void **) &gpuArr, totalMem + sizeof(unsigned long) * (numThreads * 2 + 5)); // 5 because we don't trust ourselves
cudaMalloc ((void **) &largeGPUArr, 512*1024*1024);
for (long i = 0; i < totalMem/(sizeof(unsigned long)); i += N/(sizeof(unsigned long))) {
// Device pointers are 64-bit on what we are using.
cpuArr[i] = ((unsigned long)(uintptr_t)gpuArr) + ((i + (numThreads * N/sizeof(unsigned long)))%N * sizeof(unsigned long));
}
for (long i = 0; i < 512*1024*1024/8; i++) {
largeCPUArr[i] = i;
}
cudaThreadSynchronize ();
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)gpuArr, (void *)cpuArr, totalMem, cudaMemcpyHostToDevice);
cudaMemcpy((void *)largeGPUArr, (void *)largeCPUArr, 512*1024*1024, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
// h_a[N] = 0; we don't need this
// h_a[N+1] = 0;
for (int l=0; l <10; l++) {
/* launch kernel*/
dim3 Db = dim3(numThreads);
dim3 Dg = dim3(1);
// Pray and launch our kernel
ptw_thread_kernel <<<Dg, Db>>>(gpuArr, largeGPUArr, totalMem/sizeof(unsigned long), 1, 0, duration, numAccess, numThreads, N); //don't ignore the first iteration
cudaThreadSynchronize ();
cudaError_t error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error is %s\n", cudaGetErrorString(error_id));
}
cudaThreadSynchronize ();
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long), cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
latency_sum+=latency[0];
}
/* free memory on GPU */
cudaFree(gpuArr);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(cpuArr);
free(latency);
printf("%d,%f\n", numThreads, (double)(latency_sum/(10*256.0)) );
}
int main() {
printf("Assuming page size is %d KB\n", page_size);
// printf("%d\n", sizeof(long));
// printf("%d\n", sizeof(long long));
// measure_global_dibs();
// measure_global1();
// measure_global4();
// measure_global5();
// measure_global6();
for (unsigned long i = 1; i<=64; i++) {
measure_ptw_thread(i);
}
return 0;
}
|
3aa39ad95c7f3acc9fc873939a16ef30a0298b82.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <gdf/gdf.h>
#include <gdf/ipc/Schema_generated.h>
#include <gdf/ipc/Message_generated.h>
#include <arrow/api.h>
#include <arrow/io/api.h>
#include <arrow/ipc/api.h>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <memory>
#include <vector>
#include <string>
using namespace org::apache::arrow;
namespace {
using namespace arrow;
#if ARROW_VERSION < 800
static std::string GetBufferTypeName(BufferType type) {
switch (type) {
case BufferType::DATA:
return "DATA";
case BufferType::OFFSET:
return "OFFSET";
case BufferType::TYPE:
return "TYPE";
case BufferType::VALIDITY:
return "VALIDITY";
default:
break;
}
return "UNKNOWN";
}
#endif
static std::string GetTypeName(Type::type id) {
switch (id) {
#define SHOW_TYPE_NAME(K) case Type::K: return #K;
SHOW_TYPE_NAME(NA)
SHOW_TYPE_NAME(BOOL)
SHOW_TYPE_NAME(UINT8)
SHOW_TYPE_NAME(INT8)
SHOW_TYPE_NAME(UINT16)
SHOW_TYPE_NAME(INT16)
SHOW_TYPE_NAME(UINT32)
SHOW_TYPE_NAME(INT32)
SHOW_TYPE_NAME(UINT64)
SHOW_TYPE_NAME(INT64)
SHOW_TYPE_NAME(HALF_FLOAT)
SHOW_TYPE_NAME(FLOAT)
SHOW_TYPE_NAME(DOUBLE)
SHOW_TYPE_NAME(STRING)
SHOW_TYPE_NAME(BINARY)
SHOW_TYPE_NAME(FIXED_SIZE_BINARY)
SHOW_TYPE_NAME(DATE32)
SHOW_TYPE_NAME(DATE64)
SHOW_TYPE_NAME(TIMESTAMP)
SHOW_TYPE_NAME(TIME32)
SHOW_TYPE_NAME(TIME64)
SHOW_TYPE_NAME(INTERVAL)
SHOW_TYPE_NAME(DECIMAL)
SHOW_TYPE_NAME(LIST)
SHOW_TYPE_NAME(STRUCT)
SHOW_TYPE_NAME(UNION)
SHOW_TYPE_NAME(DICTIONARY)
SHOW_TYPE_NAME(MAP)
#undef SHOW_TYPE_NAME
}
return "UNKNOWN";
}
}
class IpcParser {
public:
typedef std::unique_ptr<const char []> unique_bytes_type;
class ParseError : public std::runtime_error {
using std::runtime_error::runtime_error;
};
struct MessageInfo {
const void *header;
int64_t body_length;
flatbuf::MessageHeader type;
flatbuf::MetadataVersion version;
};
struct LayoutDesc {
int bitwidth;
std::string vectortype;
};
struct FieldDesc {
std::string name;
std::string type;
std::vector<LayoutDesc> layouts;
};
struct BufferDesc {
int64_t offset, length;
};
struct DTypeDesc {
std::string name;
int bitwidth;
};
struct NodeDesc {
std::string name;
int64_t length;
int64_t null_count;
BufferDesc null_buffer, data_buffer;
DTypeDesc dtype;
};
IpcParser()
:_d_buffer(nullptr), _d_curptr(nullptr), _d_data_body(nullptr), _failed(false)
{ /* empty */ }
void open(const uint8_t *schema, size_t length) {
try {
read_schema(schema, length);
} catch ( ParseError e ) {
std::ostringstream oss;
oss << "ParseError: " << e.what();
_error_message = oss.str();
_failed = true;
}
}
void open_recordbatches(const uint8_t *recordbatches, size_t length) {
try {
read_record_batch(recordbatches, length);
} catch ( ParseError e ) {
std::ostringstream oss;
oss << "ParseError: " << e.what();
_error_message = oss.str();
_failed = true;
}
}
bool is_failed() const {
return _failed;
}
const std::string& get_error() const {
return _error_message;
}
/*
* Returns the GPU pointer to the start of the data region.
*/
const void* get_data() const {
return static_cast<const void*>(_d_data_body);
}
int64_t get_data_offset() const {
return _d_data_body - _d_buffer;
}
/*
* Returns the layout information in json.
* The json contains a list metadata for each column.
*/
const std::string& get_layout_json() {
if ( _json_output.size() == 0 ) {
std::ostringstream oss;
oss << "[";
int ct = 0;
for (auto i=_nodes.begin(); i!=_nodes.end(); ++i, ++ct) {
if ( ct > 0 ) {
oss << ", ";
}
jsonify_node(oss, *i);
}
oss << "]";
_json_output = oss.str();
}
return _json_output;
}
const std::string& get_schema_json() {
if ( _json_schema_output.size() == 0 ) {
// To JSON
#if ARROW_VERSION < 800
std::unique_ptr<arrow::ipc::JsonWriter> json_writer;
arrow::ipc::JsonWriter::Open(_schema, &json_writer);
json_writer->Finish(&_json_schema_output);
#else
std::unique_ptr<arrow::ipc::internal::json::JsonWriter> json_writer;
arrow::ipc::internal::json::JsonWriter::Open(_schema, &json_writer);
json_writer->Finish(&_json_schema_output);
#endif
}
return _json_schema_output;
}
protected:
void jsonify_node(std::ostream &os, const NodeDesc &node) {
os << "{";
os << "\"name\": " << '"' << node.name << '"';
os << ", ";
os << "\"length\": " << node.length;
os << ", ";
os << "\"null_count\": " << node.null_count;
os << ", ";
os << "\"dtype\": ";
jsonify_dtype(os, node.dtype);
os << ", ";
os << "\"data_buffer\": ";
jsonify_buffer(os, node.data_buffer);
os << ", ";
os << "\"null_buffer\": ";
jsonify_buffer(os, node.null_buffer);
os << "}";
}
void jsonify_dtype(std::ostream &os, const DTypeDesc &dtype) {
os << "{";
os << "\"name\": " << '"' << dtype.name << '"';
os << ", ";
os << "\"bitwidth\": " << dtype.bitwidth;
os << "}";
}
void jsonify_buffer(std::ostream &os, const BufferDesc &buffer) {
os << "{";
os << "\"length\": " << buffer.length;
os << ", ";
os << "\"offset\": " << buffer.offset;
os << "}";
}
void read_schema(const uint8_t *schema_buf, size_t length) {
if (_fields.size() || _nodes.size()) {
throw ParseError("cannot open more than once");
}
// Use Arrow to load the schema
const auto payload = std::make_shared<arrow::Buffer>(schema_buf, length);
auto buffer = std::make_shared<io::BufferReader>(payload);
#if ARROW_VERSION < 800
std::shared_ptr<ipc::RecordBatchStreamReader> reader;
#else
std::shared_ptr<ipc::RecordBatchReader> reader;
#endif
auto status = ipc::RecordBatchStreamReader::Open(buffer, &reader);
if ( !status.ok() ) {
throw ParseError(status.message());
}
_schema = reader->schema();
if (!_schema) throw ParseError("failed to parse schema");
// Parse the schema
parse_schema(_schema);
}
void read_record_batch(const uint8_t *recordbatches, size_t length) {
_d_curptr = _d_buffer = recordbatches;
int size = read_msg_size();
auto header_buf = read_bytes(size);
auto header = parse_msg_header(header_buf);
#if ARROW_VERSION < 800
if ( header.version != flatbuf::MetadataVersion_V3 )
throw ParseError("unsupported metadata version, expected V3 got "\
+ std::string(flatbuf::EnumNameMetadataVersion(header.version)));
#else
if ( header.version != flatbuf::MetadataVersion_V4 )
throw ParseError("unsupported metadata version, expected V4 got "\
+ std::string(flatbuf::EnumNameMetadataVersion(header.version)));
#endif
if ( header.body_length <= 0) {
throw ParseError("recordbatch should have a body");
}
// store the current ptr as the data ptr
_d_data_body = _d_curptr;
parse_record_batch(header);
}
MessageInfo parse_msg_header(const unique_bytes_type & header_buf) {
auto msg = flatbuf::GetMessage(header_buf.get());
MessageInfo mi;
mi.header = msg->header();
mi.body_length = msg->bodyLength();
mi.type = msg->header_type();
mi.version = msg->version();
return mi;
}
void parse_schema(std::shared_ptr<arrow::Schema> schema) {
auto fields = schema->fields();
_fields.reserve(fields.size());
for (auto field : fields) {
_fields.push_back(FieldDesc());
auto & out_field = _fields.back();
out_field.name = field->name();
out_field.type = GetTypeName(field->type()->id());
#if ARROW_VERSION < 800
auto layouts = field->type()->GetBufferLayout();
for ( int j=0; j < layouts.size(); ++j ) {
auto layout = layouts[j];
LayoutDesc layout_desc;
layout_desc.bitwidth = layout.bit_width();
layout_desc.vectortype = GetBufferTypeName(layout.type());
out_field.layouts.push_back(layout_desc);
}
#endif
}
}
void parse_record_batch(MessageInfo msg) {
if ( msg.type != flatbuf::MessageHeader_RecordBatch ) {
throw ParseError("expecting recordbatch type");
}
auto rb = static_cast<const flatbuf::RecordBatch*>(msg.header);
int node_ct = rb->nodes()->Length();
int buffer_ct = rb->buffers()->Length();
int buffer_per_node = 2;
if ( node_ct * buffer_per_node != buffer_ct ) {
throw ParseError("unexpected: more than 2 buffers per node!?");
}
_nodes.reserve(node_ct);
for ( int i=0; i < node_ct; ++i ) {
const auto &fd = _fields[i];
auto node = rb->nodes()->Get(i);
_nodes.push_back(NodeDesc());
auto &out_node = _nodes.back();
for ( int j=0; j < buffer_per_node; ++j ) {
auto buf = rb->buffers()->Get(i * buffer_per_node + j);
#if ARROW_VERSION < 800
if ( buf->page() != -1 ) {
std::cerr << "buf.Page() != -1; metadata format changed!\n";
}
#endif
BufferDesc bufdesc;
bufdesc.offset = buf->offset();
bufdesc.length = buf->length();
#if ARROW_VERSION < 800
const auto &layout = fd.layouts[j];
if ( layout.vectortype == "DATA" ) {
out_node.data_buffer = bufdesc;
out_node.dtype.name = fd.type;
out_node.dtype.bitwidth = layout.bitwidth;
} else if ( layout.vectortype == "VALIDITY" ) {
out_node.null_buffer = bufdesc;
} else {
throw ParseError("unsupported vector type");
}
#else
if (j==0) // assuming first buffer is null bitmap
out_node.null_buffer = bufdesc;
else {
out_node.data_buffer = bufdesc;
out_node.dtype.name = fd.type;
out_node.dtype.bitwidth = (bufdesc.length / node->length()) * 8;
}
#endif
}
assert(out_node.null_buffer.length <= out_node.data_buffer.length); // check the null bitmap assumption
out_node.name = fd.name;
out_node.length = node->length();
out_node.null_count = node->null_count();
}
}
unique_bytes_type read_bytes(size_t size) {
if (size <= 0) {
throw ParseError("attempt to read zero or negative bytes");
}
char *buf = new char[size];
if (hipSuccess != hipMemcpy(buf, _d_curptr, size,
hipMemcpyDeviceToHost) )
throw ParseError("cannot read value");
_d_curptr += size;
return unique_bytes_type(buf);
}
template<typename T>
void read_value(T &val) {
if (hipSuccess != hipMemcpy(&val, _d_curptr, sizeof(T),
hipMemcpyDeviceToHost) )
throw ParseError("cannot read value");
_d_curptr += sizeof(T);
}
int read_msg_size() {
int size;
read_value(size);
if (size <= 0) {
throw ParseError("non-positive message size");
}
return size;
}
private:
const uint8_t *_d_buffer;
const uint8_t *_d_curptr;
const uint8_t *_d_data_body;
std::shared_ptr<arrow::Schema> _schema;
std::vector<FieldDesc> _fields;
std::vector<NodeDesc> _nodes;
bool _failed;
std::string _error_message;
// cache
std::string _json_output;
std::string _json_schema_output;
};
gdf_ipc_parser_type* cffi_wrap(IpcParser* obj){
return reinterpret_cast<gdf_ipc_parser_type*>(obj);
}
IpcParser* cffi_unwrap(gdf_ipc_parser_type* hdl){
return reinterpret_cast<IpcParser*>(hdl);
}
gdf_ipc_parser_type* gdf_ipc_parser_open(const uint8_t *schema, size_t length) {
IpcParser *parser = new IpcParser;
parser->open(schema, length);
return cffi_wrap(parser);
}
void gdf_ipc_parser_close(gdf_ipc_parser_type *handle) {
delete cffi_unwrap(handle);
}
int gdf_ipc_parser_failed(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->is_failed();
}
const char *gdf_ipc_parser_get_schema_json(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_schema_json().c_str();
}
const char* gdf_ipc_parser_get_layout_json(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_layout_json().c_str();
}
const char* gdf_ipc_parser_get_error(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_error().c_str();
}
const void* gdf_ipc_parser_get_data(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_data();
}
int64_t gdf_ipc_parser_get_data_offset(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_data_offset();
}
void gdf_ipc_parser_open_recordbatches(gdf_ipc_parser_type *handle,
const uint8_t *recordbatches,
size_t length)
{
return cffi_unwrap(handle)->open_recordbatches(recordbatches, length);
}
|
3aa39ad95c7f3acc9fc873939a16ef30a0298b82.cu
|
#include <gdf/gdf.h>
#include <gdf/ipc/Schema_generated.h>
#include <gdf/ipc/Message_generated.h>
#include <arrow/api.h>
#include <arrow/io/api.h>
#include <arrow/ipc/api.h>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <memory>
#include <vector>
#include <string>
using namespace org::apache::arrow;
namespace {
using namespace arrow;
#if ARROW_VERSION < 800
static std::string GetBufferTypeName(BufferType type) {
switch (type) {
case BufferType::DATA:
return "DATA";
case BufferType::OFFSET:
return "OFFSET";
case BufferType::TYPE:
return "TYPE";
case BufferType::VALIDITY:
return "VALIDITY";
default:
break;
}
return "UNKNOWN";
}
#endif
static std::string GetTypeName(Type::type id) {
switch (id) {
#define SHOW_TYPE_NAME(K) case Type::K: return #K;
SHOW_TYPE_NAME(NA)
SHOW_TYPE_NAME(BOOL)
SHOW_TYPE_NAME(UINT8)
SHOW_TYPE_NAME(INT8)
SHOW_TYPE_NAME(UINT16)
SHOW_TYPE_NAME(INT16)
SHOW_TYPE_NAME(UINT32)
SHOW_TYPE_NAME(INT32)
SHOW_TYPE_NAME(UINT64)
SHOW_TYPE_NAME(INT64)
SHOW_TYPE_NAME(HALF_FLOAT)
SHOW_TYPE_NAME(FLOAT)
SHOW_TYPE_NAME(DOUBLE)
SHOW_TYPE_NAME(STRING)
SHOW_TYPE_NAME(BINARY)
SHOW_TYPE_NAME(FIXED_SIZE_BINARY)
SHOW_TYPE_NAME(DATE32)
SHOW_TYPE_NAME(DATE64)
SHOW_TYPE_NAME(TIMESTAMP)
SHOW_TYPE_NAME(TIME32)
SHOW_TYPE_NAME(TIME64)
SHOW_TYPE_NAME(INTERVAL)
SHOW_TYPE_NAME(DECIMAL)
SHOW_TYPE_NAME(LIST)
SHOW_TYPE_NAME(STRUCT)
SHOW_TYPE_NAME(UNION)
SHOW_TYPE_NAME(DICTIONARY)
SHOW_TYPE_NAME(MAP)
#undef SHOW_TYPE_NAME
}
return "UNKNOWN";
}
}
class IpcParser {
public:
typedef std::unique_ptr<const char []> unique_bytes_type;
class ParseError : public std::runtime_error {
using std::runtime_error::runtime_error;
};
struct MessageInfo {
const void *header;
int64_t body_length;
flatbuf::MessageHeader type;
flatbuf::MetadataVersion version;
};
struct LayoutDesc {
int bitwidth;
std::string vectortype;
};
struct FieldDesc {
std::string name;
std::string type;
std::vector<LayoutDesc> layouts;
};
struct BufferDesc {
int64_t offset, length;
};
struct DTypeDesc {
std::string name;
int bitwidth;
};
struct NodeDesc {
std::string name;
int64_t length;
int64_t null_count;
BufferDesc null_buffer, data_buffer;
DTypeDesc dtype;
};
IpcParser()
:_d_buffer(nullptr), _d_curptr(nullptr), _d_data_body(nullptr), _failed(false)
{ /* empty */ }
void open(const uint8_t *schema, size_t length) {
try {
read_schema(schema, length);
} catch ( ParseError e ) {
std::ostringstream oss;
oss << "ParseError: " << e.what();
_error_message = oss.str();
_failed = true;
}
}
void open_recordbatches(const uint8_t *recordbatches, size_t length) {
try {
read_record_batch(recordbatches, length);
} catch ( ParseError e ) {
std::ostringstream oss;
oss << "ParseError: " << e.what();
_error_message = oss.str();
_failed = true;
}
}
bool is_failed() const {
return _failed;
}
const std::string& get_error() const {
return _error_message;
}
/*
* Returns the GPU pointer to the start of the data region.
*/
const void* get_data() const {
return static_cast<const void*>(_d_data_body);
}
int64_t get_data_offset() const {
return _d_data_body - _d_buffer;
}
/*
* Returns the layout information in json.
* The json contains a list metadata for each column.
*/
const std::string& get_layout_json() {
if ( _json_output.size() == 0 ) {
std::ostringstream oss;
oss << "[";
int ct = 0;
for (auto i=_nodes.begin(); i!=_nodes.end(); ++i, ++ct) {
if ( ct > 0 ) {
oss << ", ";
}
jsonify_node(oss, *i);
}
oss << "]";
_json_output = oss.str();
}
return _json_output;
}
const std::string& get_schema_json() {
if ( _json_schema_output.size() == 0 ) {
// To JSON
#if ARROW_VERSION < 800
std::unique_ptr<arrow::ipc::JsonWriter> json_writer;
arrow::ipc::JsonWriter::Open(_schema, &json_writer);
json_writer->Finish(&_json_schema_output);
#else
std::unique_ptr<arrow::ipc::internal::json::JsonWriter> json_writer;
arrow::ipc::internal::json::JsonWriter::Open(_schema, &json_writer);
json_writer->Finish(&_json_schema_output);
#endif
}
return _json_schema_output;
}
protected:
void jsonify_node(std::ostream &os, const NodeDesc &node) {
os << "{";
os << "\"name\": " << '"' << node.name << '"';
os << ", ";
os << "\"length\": " << node.length;
os << ", ";
os << "\"null_count\": " << node.null_count;
os << ", ";
os << "\"dtype\": ";
jsonify_dtype(os, node.dtype);
os << ", ";
os << "\"data_buffer\": ";
jsonify_buffer(os, node.data_buffer);
os << ", ";
os << "\"null_buffer\": ";
jsonify_buffer(os, node.null_buffer);
os << "}";
}
void jsonify_dtype(std::ostream &os, const DTypeDesc &dtype) {
os << "{";
os << "\"name\": " << '"' << dtype.name << '"';
os << ", ";
os << "\"bitwidth\": " << dtype.bitwidth;
os << "}";
}
void jsonify_buffer(std::ostream &os, const BufferDesc &buffer) {
os << "{";
os << "\"length\": " << buffer.length;
os << ", ";
os << "\"offset\": " << buffer.offset;
os << "}";
}
void read_schema(const uint8_t *schema_buf, size_t length) {
if (_fields.size() || _nodes.size()) {
throw ParseError("cannot open more than once");
}
// Use Arrow to load the schema
const auto payload = std::make_shared<arrow::Buffer>(schema_buf, length);
auto buffer = std::make_shared<io::BufferReader>(payload);
#if ARROW_VERSION < 800
std::shared_ptr<ipc::RecordBatchStreamReader> reader;
#else
std::shared_ptr<ipc::RecordBatchReader> reader;
#endif
auto status = ipc::RecordBatchStreamReader::Open(buffer, &reader);
if ( !status.ok() ) {
throw ParseError(status.message());
}
_schema = reader->schema();
if (!_schema) throw ParseError("failed to parse schema");
// Parse the schema
parse_schema(_schema);
}
void read_record_batch(const uint8_t *recordbatches, size_t length) {
_d_curptr = _d_buffer = recordbatches;
int size = read_msg_size();
auto header_buf = read_bytes(size);
auto header = parse_msg_header(header_buf);
#if ARROW_VERSION < 800
if ( header.version != flatbuf::MetadataVersion_V3 )
throw ParseError("unsupported metadata version, expected V3 got "\
+ std::string(flatbuf::EnumNameMetadataVersion(header.version)));
#else
if ( header.version != flatbuf::MetadataVersion_V4 )
throw ParseError("unsupported metadata version, expected V4 got "\
+ std::string(flatbuf::EnumNameMetadataVersion(header.version)));
#endif
if ( header.body_length <= 0) {
throw ParseError("recordbatch should have a body");
}
// store the current ptr as the data ptr
_d_data_body = _d_curptr;
parse_record_batch(header);
}
MessageInfo parse_msg_header(const unique_bytes_type & header_buf) {
auto msg = flatbuf::GetMessage(header_buf.get());
MessageInfo mi;
mi.header = msg->header();
mi.body_length = msg->bodyLength();
mi.type = msg->header_type();
mi.version = msg->version();
return mi;
}
void parse_schema(std::shared_ptr<arrow::Schema> schema) {
auto fields = schema->fields();
_fields.reserve(fields.size());
for (auto field : fields) {
_fields.push_back(FieldDesc());
auto & out_field = _fields.back();
out_field.name = field->name();
out_field.type = GetTypeName(field->type()->id());
#if ARROW_VERSION < 800
auto layouts = field->type()->GetBufferLayout();
for ( int j=0; j < layouts.size(); ++j ) {
auto layout = layouts[j];
LayoutDesc layout_desc;
layout_desc.bitwidth = layout.bit_width();
layout_desc.vectortype = GetBufferTypeName(layout.type());
out_field.layouts.push_back(layout_desc);
}
#endif
}
}
void parse_record_batch(MessageInfo msg) {
if ( msg.type != flatbuf::MessageHeader_RecordBatch ) {
throw ParseError("expecting recordbatch type");
}
auto rb = static_cast<const flatbuf::RecordBatch*>(msg.header);
int node_ct = rb->nodes()->Length();
int buffer_ct = rb->buffers()->Length();
int buffer_per_node = 2;
if ( node_ct * buffer_per_node != buffer_ct ) {
throw ParseError("unexpected: more than 2 buffers per node!?");
}
_nodes.reserve(node_ct);
for ( int i=0; i < node_ct; ++i ) {
const auto &fd = _fields[i];
auto node = rb->nodes()->Get(i);
_nodes.push_back(NodeDesc());
auto &out_node = _nodes.back();
for ( int j=0; j < buffer_per_node; ++j ) {
auto buf = rb->buffers()->Get(i * buffer_per_node + j);
#if ARROW_VERSION < 800
if ( buf->page() != -1 ) {
std::cerr << "buf.Page() != -1; metadata format changed!\n";
}
#endif
BufferDesc bufdesc;
bufdesc.offset = buf->offset();
bufdesc.length = buf->length();
#if ARROW_VERSION < 800
const auto &layout = fd.layouts[j];
if ( layout.vectortype == "DATA" ) {
out_node.data_buffer = bufdesc;
out_node.dtype.name = fd.type;
out_node.dtype.bitwidth = layout.bitwidth;
} else if ( layout.vectortype == "VALIDITY" ) {
out_node.null_buffer = bufdesc;
} else {
throw ParseError("unsupported vector type");
}
#else
if (j==0) // assuming first buffer is null bitmap
out_node.null_buffer = bufdesc;
else {
out_node.data_buffer = bufdesc;
out_node.dtype.name = fd.type;
out_node.dtype.bitwidth = (bufdesc.length / node->length()) * 8;
}
#endif
}
assert(out_node.null_buffer.length <= out_node.data_buffer.length); // check the null bitmap assumption
out_node.name = fd.name;
out_node.length = node->length();
out_node.null_count = node->null_count();
}
}
unique_bytes_type read_bytes(size_t size) {
if (size <= 0) {
throw ParseError("attempt to read zero or negative bytes");
}
char *buf = new char[size];
if (cudaSuccess != cudaMemcpy(buf, _d_curptr, size,
cudaMemcpyDeviceToHost) )
throw ParseError("cannot read value");
_d_curptr += size;
return unique_bytes_type(buf);
}
template<typename T>
void read_value(T &val) {
if (cudaSuccess != cudaMemcpy(&val, _d_curptr, sizeof(T),
cudaMemcpyDeviceToHost) )
throw ParseError("cannot read value");
_d_curptr += sizeof(T);
}
int read_msg_size() {
int size;
read_value(size);
if (size <= 0) {
throw ParseError("non-positive message size");
}
return size;
}
private:
const uint8_t *_d_buffer;
const uint8_t *_d_curptr;
const uint8_t *_d_data_body;
std::shared_ptr<arrow::Schema> _schema;
std::vector<FieldDesc> _fields;
std::vector<NodeDesc> _nodes;
bool _failed;
std::string _error_message;
// cache
std::string _json_output;
std::string _json_schema_output;
};
gdf_ipc_parser_type* cffi_wrap(IpcParser* obj){
return reinterpret_cast<gdf_ipc_parser_type*>(obj);
}
IpcParser* cffi_unwrap(gdf_ipc_parser_type* hdl){
return reinterpret_cast<IpcParser*>(hdl);
}
gdf_ipc_parser_type* gdf_ipc_parser_open(const uint8_t *schema, size_t length) {
IpcParser *parser = new IpcParser;
parser->open(schema, length);
return cffi_wrap(parser);
}
void gdf_ipc_parser_close(gdf_ipc_parser_type *handle) {
delete cffi_unwrap(handle);
}
int gdf_ipc_parser_failed(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->is_failed();
}
const char *gdf_ipc_parser_get_schema_json(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_schema_json().c_str();
}
const char* gdf_ipc_parser_get_layout_json(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_layout_json().c_str();
}
const char* gdf_ipc_parser_get_error(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_error().c_str();
}
const void* gdf_ipc_parser_get_data(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_data();
}
int64_t gdf_ipc_parser_get_data_offset(gdf_ipc_parser_type *handle) {
return cffi_unwrap(handle)->get_data_offset();
}
void gdf_ipc_parser_open_recordbatches(gdf_ipc_parser_type *handle,
const uint8_t *recordbatches,
size_t length)
{
return cffi_unwrap(handle)->open_recordbatches(recordbatches, length);
}
|
48cba0de3e0652f7af6dad920cfb66bfef805b84.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void dkernel(unsigned *vector, unsigned vectorsize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if (id % 2) vector[id] = id;
else vector[id] = vectorsize * vectorsize;
}
#define BLOCKSIZE 10
#define N BLOCKSIZE
int main(int nn, char *str[]) {
unsigned *vector, *hvector;
hipMalloc(&vector, N * sizeof(unsigned));
hvector = (unsigned *)malloc(N * sizeof(unsigned));
unsigned nblocks = ceil((float)N / BLOCKSIZE);
hipLaunchKernelGGL(( dkernel), dim3(nblocks), dim3(BLOCKSIZE), 0, 0, vector, N);
hipMemcpy(hvector, vector, N * sizeof(unsigned), hipMemcpyDeviceToHost);
for (unsigned ii = 0; ii < N; ++ii) {
printf("%4d ", hvector[ii]);
}
printf("\n");
return 0;
}
|
48cba0de3e0652f7af6dad920cfb66bfef805b84.cu
|
#include <stdio.h>
#include <cuda.h>
__global__ void dkernel(unsigned *vector, unsigned vectorsize) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if (id % 2) vector[id] = id;
else vector[id] = vectorsize * vectorsize;
}
#define BLOCKSIZE 10
#define N BLOCKSIZE
int main(int nn, char *str[]) {
unsigned *vector, *hvector;
cudaMalloc(&vector, N * sizeof(unsigned));
hvector = (unsigned *)malloc(N * sizeof(unsigned));
unsigned nblocks = ceil((float)N / BLOCKSIZE);
dkernel<<<nblocks, BLOCKSIZE>>>(vector, N);
cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
for (unsigned ii = 0; ii < N; ++ii) {
printf("%4d ", hvector[ii]);
}
printf("\n");
return 0;
}
|
4ceedd43ad3b48f99842235ffcc4b31f18ff559e.hip
|
// !!! This is a file automatically generated by hipify!!!
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/OpMathType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/hip/ScanKernels.h>
#include <ATen/native/hip/ScanUtils.cuh>
#include <cmath>
#include <limits>
namespace at::native {
// custom min and max to be used in logcumsumexp for complex arguments
template <typename scalar_t, bool min>
__host__ __device__ c10::complex<scalar_t> _logcumsumexp_minmax(const c10::complex<scalar_t>& x, const c10::complex<scalar_t>& y) {
scalar_t xr = std::real(x);
scalar_t yr = std::real(y);
if (::isnan(yr) || (::isnan(std::imag(y)))) {
return y;
} else if (::isnan(xr) || (::isnan(std::imag(x)))) {
return x;
} else if (min) { // min
return (xr < yr) ? x : y;
} else { // max
return (xr >= yr) ? x : y;
}
}
template <typename scalar_t>
__host__ __device__ scalar_t _log_add_exp_helper(const scalar_t& x, const scalar_t& y) {
// Reference : https://www.tensorflow.org/api_docs/python/tf/math/cumulative_logsumexp
// Using the original expression: `at::_isnan(y) ? y : ::min(x, y)` causes an error in ROCM
auto isnan_x = at::_isnan(x);
auto isnan_y = at::_isnan(y);
scalar_t min = isnan_y ? y : (isnan_x ? x : ::min(x, y));
scalar_t max = isnan_y ? y : (isnan_x ? x : ::max(x, y));
if (min != max || ::isfinite(min)) {
// nan will be propagated here
return ::log1p(::exp(min - max)) + max;
} else {
// special case to correctly handle infinite cases
return x;
}
}
template <typename scalar_t>
__host__ __device__ c10::complex<scalar_t> _fast_build_exp(const c10::complex<scalar_t>& x) {
// complex exponential function, but implemented manually to get fast compilation time
// this function only handles the case where the x is finite (not inf nor nan)
auto xreal = std::real(x);
auto ximag = std::imag(x);
auto exp_x_abs = ::exp(xreal);
auto exp_x_real = exp_x_abs * std::cos(ximag);
auto exp_x_imag = exp_x_abs * std::sin(ximag);
return {exp_x_real, exp_x_imag};
}
template <typename scalar_t>
__host__ __device__ c10::complex<scalar_t> _fast_build_exp_inf(const c10::complex<scalar_t>& x) {
// complex exponential function, but implemented manually to get fast compilation time
// this function only handles the case where the real part of x is infinite
auto ximag = std::imag(x);
auto exp_x_abs = std::numeric_limits<scalar_t>::infinity();
auto sin = std::sin(ximag);
auto cos = std::cos(ximag);
// special case if the angle is exactly the multiple of pi/2
auto exp_x_real = (cos == 0) ? (scalar_t)0.0 : exp_x_abs * cos;
auto exp_x_imag = (sin == 0) ? (scalar_t)0.0 : exp_x_abs * sin;
return {exp_x_real, exp_x_imag};
}
template <typename scalar_t>
__host__ __device__ c10::complex<scalar_t> _log_add_exp_helper(const c10::complex<scalar_t>& x, const c10::complex<scalar_t>& y) {
c10::complex<scalar_t> min = _logcumsumexp_minmax<scalar_t, /*min=*/true>(x, y);
c10::complex<scalar_t> max = _logcumsumexp_minmax<scalar_t, /*min=*/false>(x, y);
scalar_t min_real = std::real(min);
scalar_t max_real = std::real(max);
if (::isnan(min_real) || ::isnan(std::imag(min))) {
// handling the "infectious" NaNs
return {std::numeric_limits<scalar_t>::quiet_NaN(), std::numeric_limits<scalar_t>::quiet_NaN()};
}
else if ((!::isfinite(min_real)) && (min_real == max_real)) {
if (min_real < 0) {
// handle the -inf case, the imaginary part here does not really matter as the exp(value)
// will be around 0.0 and the angle (i.e. the imaginary part) cannot be determined.
// It does not matter if we're taking the exp of this value
return min;
} else {
// handle the +inf case, we don't need the special precision for log1p for small values
// and to avoid producing nan in case of real(max) == real(min) == +inf
auto exp_min = _fast_build_exp_inf(min);
auto exp_max = _fast_build_exp_inf(max);
return ::log1p(exp_min + exp_max - 1); // log1p(x - 1) builds faster than log
}
} else {
auto minmax = min - max;
auto exp_minmax = _fast_build_exp(minmax);
return ::log1p(exp_minmax) + max;
}
}
void launch_logcumsumexp_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
// Compile time for CUDA-11.4 is 3x slower than with CUDA-11.6+, specifically for complex numbers
#if defined(FBCODE_CAFFE2)
#define _LCME_DISPATCH AT_DISPATCH_FLOATING_TYPES_AND2
#else
#define _LCME_DISPATCH AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2
#endif
_LCME_DISPATCH(ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "logcumsumexp_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x_, const scalar_t y_) -> scalar_t {
const opmath_t x{x_}, y{y_};
return _log_add_exp_helper(x, y);
};
scan_dim<scalar_t>(self, result, dim, init, log_add_exp);
});
}
} // namespace at::native
|
4ceedd43ad3b48f99842235ffcc4b31f18ff559e.cu
|
#define TORCH_ASSERT_NO_OPERATORS
#include <ATen/core/TensorBase.h>
#include <ATen/OpMathType.h>
#include <ATen/Dispatch.h>
#include <ATen/native/cuda/ScanKernels.h>
#include <ATen/native/cuda/ScanUtils.cuh>
#include <cmath>
#include <limits>
namespace at::native {
// custom min and max to be used in logcumsumexp for complex arguments
template <typename scalar_t, bool min>
__host__ __device__ c10::complex<scalar_t> _logcumsumexp_minmax(const c10::complex<scalar_t>& x, const c10::complex<scalar_t>& y) {
scalar_t xr = std::real(x);
scalar_t yr = std::real(y);
if (::isnan(yr) || (::isnan(std::imag(y)))) {
return y;
} else if (::isnan(xr) || (::isnan(std::imag(x)))) {
return x;
} else if (min) { // min
return (xr < yr) ? x : y;
} else { // max
return (xr >= yr) ? x : y;
}
}
template <typename scalar_t>
__host__ __device__ scalar_t _log_add_exp_helper(const scalar_t& x, const scalar_t& y) {
// Reference : https://www.tensorflow.org/api_docs/python/tf/math/cumulative_logsumexp
// Using the original expression: `at::_isnan(y) ? y : std::min(x, y)` causes an error in ROCM
auto isnan_x = at::_isnan(x);
auto isnan_y = at::_isnan(y);
scalar_t min = isnan_y ? y : (isnan_x ? x : std::min(x, y));
scalar_t max = isnan_y ? y : (isnan_x ? x : std::max(x, y));
if (min != max || ::isfinite(min)) {
// nan will be propagated here
return ::log1p(std::exp(min - max)) + max;
} else {
// special case to correctly handle infinite cases
return x;
}
}
template <typename scalar_t>
__host__ __device__ c10::complex<scalar_t> _fast_build_exp(const c10::complex<scalar_t>& x) {
// complex exponential function, but implemented manually to get fast compilation time
// this function only handles the case where the x is finite (not inf nor nan)
auto xreal = std::real(x);
auto ximag = std::imag(x);
auto exp_x_abs = std::exp(xreal);
auto exp_x_real = exp_x_abs * std::cos(ximag);
auto exp_x_imag = exp_x_abs * std::sin(ximag);
return {exp_x_real, exp_x_imag};
}
template <typename scalar_t>
__host__ __device__ c10::complex<scalar_t> _fast_build_exp_inf(const c10::complex<scalar_t>& x) {
// complex exponential function, but implemented manually to get fast compilation time
// this function only handles the case where the real part of x is infinite
auto ximag = std::imag(x);
auto exp_x_abs = std::numeric_limits<scalar_t>::infinity();
auto sin = std::sin(ximag);
auto cos = std::cos(ximag);
// special case if the angle is exactly the multiple of pi/2
auto exp_x_real = (cos == 0) ? (scalar_t)0.0 : exp_x_abs * cos;
auto exp_x_imag = (sin == 0) ? (scalar_t)0.0 : exp_x_abs * sin;
return {exp_x_real, exp_x_imag};
}
template <typename scalar_t>
__host__ __device__ c10::complex<scalar_t> _log_add_exp_helper(const c10::complex<scalar_t>& x, const c10::complex<scalar_t>& y) {
c10::complex<scalar_t> min = _logcumsumexp_minmax<scalar_t, /*min=*/true>(x, y);
c10::complex<scalar_t> max = _logcumsumexp_minmax<scalar_t, /*min=*/false>(x, y);
scalar_t min_real = std::real(min);
scalar_t max_real = std::real(max);
if (::isnan(min_real) || ::isnan(std::imag(min))) {
// handling the "infectious" NaNs
return {std::numeric_limits<scalar_t>::quiet_NaN(), std::numeric_limits<scalar_t>::quiet_NaN()};
}
else if ((!::isfinite(min_real)) && (min_real == max_real)) {
if (min_real < 0) {
// handle the -inf case, the imaginary part here does not really matter as the exp(value)
// will be around 0.0 and the angle (i.e. the imaginary part) cannot be determined.
// It does not matter if we're taking the exp of this value
return min;
} else {
// handle the +inf case, we don't need the special precision for log1p for small values
// and to avoid producing nan in case of real(max) == real(min) == +inf
auto exp_min = _fast_build_exp_inf(min);
auto exp_max = _fast_build_exp_inf(max);
return ::log1p(exp_min + exp_max - 1); // log1p(x - 1) builds faster than log
}
} else {
auto minmax = min - max;
auto exp_minmax = _fast_build_exp(minmax);
return ::log1p(exp_minmax) + max;
}
}
void launch_logcumsumexp_cuda_kernel(const TensorBase& result, const TensorBase& self, int64_t dim) {
// Compile time for CUDA-11.4 is 3x slower than with CUDA-11.6+, specifically for complex numbers
#if defined(FBCODE_CAFFE2)
#define _LCME_DISPATCH AT_DISPATCH_FLOATING_TYPES_AND2
#else
#define _LCME_DISPATCH AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2
#endif
_LCME_DISPATCH(ScalarType::Half, ScalarType::BFloat16,
self.scalar_type(), "logcumsumexp_cuda",
[&]() {
using opmath_t = at::opmath_type<scalar_t>;
scalar_t init = -std::numeric_limits<scalar_t>::infinity();
auto log_add_exp = [] C10_HOST_DEVICE (const scalar_t x_, const scalar_t y_) -> scalar_t {
const opmath_t x{x_}, y{y_};
return _log_add_exp_helper(x, y);
};
scan_dim<scalar_t>(self, result, dim, init, log_add_exp);
});
}
} // namespace at::native
|
61aca7b84ab61e6d3f477aaa1d8d9bfbedb1d800.hip
|
// !!! This is a file automatically generated by hipify!!!
/** \file "vanleertheta.cu" : implements the kernel for the "VanLeerTheta" procedure
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <hip/hip_runtime.h>
// BLOCK_X : in azimuth
//#define BLOCK_X 16
//#define BLOCK_X DEF_BLOCK_X_VANLEERTHETA
#define BLOCK_X 32
// BLOCK_Y : in radius
#define BLOCK_Y 8
#define GET_TAB(u,x,y,pitch) *(u + __mul24(y, pitch) + x)
#define invsurf CRadiiStuff[(nr+1)*7+ig]
#define invrmed CRadiiStuff[(nr+1)*2 + ig]
//__constant__ double CRadiiStuff[8192];
__device__ double CRadiiStuff[32768];
extern PolarGrid *RhoStar, *QRStar, *Work;
__global__ void kernel_vlth (double *rhos,
double *qrs,
double *vt,
double *qb,
int ns,
int nr,
int pitch,
double dt) {
__shared__ double srhos[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double svt[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double sqrs[(BLOCK_X+2)*(BLOCK_Y+2)];
//double dr, invsurf, fluxp, fluxm;
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
// js & is, l like 'local' (shared memory <=> local patch)
int js = threadIdx.x + 1;
int is = threadIdx.y + 1;
int ids = is*(blockDim.x+2)+js;
int idg = __mul24(ig, pitch) + jg;
int jgp = jg+1;
if (jgp == ns) jgp = 0;
// We perform a coalesced read of 'rhos' into the shared memory;
srhos[ids] = rhos[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
srhos[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (rhos, jgp, ig, pitch);
// We perform a coalesced read of 'qrs' into the shared memory;
sqrs[ids] = qrs[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
sqrs[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (qrs, jgp, ig, pitch);
// We perform a coalesced read of 'vt' into the shared memory;
svt[ids] = vt[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
svt[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (vt, jgp, ig, pitch);
__syncthreads ();
const double dr = CRadiiStuff[(nr+1)*8+ig] - CRadiiStuff[(nr+1)*4+ig];
// invsurf = CRadiiStuff[(nr+1)*7+ig];
const double fluxm = sqrs[ids] * srhos[ids] * svt[ids];
const double fluxp = sqrs[ids+1] * srhos[ids+1] * svt[ids+1];
// qb[idg] += __dadd_rn(fluxm,-fluxp)*invsurf*dr*dt;
qb[idg] += (fluxm - fluxp)*invsurf*dr*dt;
}
extern "C"
void VanLeerTheta_gpu_cu (PolarGrid *Vtheta, PolarGrid *Qbase, double dt)
{
int nr, ns;
nr = Vtheta->Nrad;
ns = Vtheta->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(14*(nr+1))*sizeof(double), 0, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_vlth) , dim3(grid), dim3(block) , 0, 0, RhoStar->gpu_field,
QRStar->gpu_field,
Vtheta->gpu_field,
Qbase->gpu_field,
Vtheta->Nsec,
Vtheta->Nrad,
Vtheta->pitch/sizeof(double),
dt);
hipDeviceSynchronize();
hipError_t err = hipGetLastError ();
if ( hipSuccess != err) {
fprintf (stderr, "Cuda error kernel vlth failed \t%s\n", hipGetErrorString (err));
exit (-1);
}
}
__global__ void kernel_vlthds (double *rhos,
double *qrs,
double *vt,
double *qb,
double invdphi,
int ns,
int nr,
int pitch,
double dt) {
__shared__ double srhos[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double svt[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double sqrs[(BLOCK_X+2)*(BLOCK_Y+2)];
//double dr, invsurf, fluxp, fluxm;
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
// js & is, l like 'local' (shared memory <=> local patch)
int js = threadIdx.x + 1;
int is = threadIdx.y + 1;
int ids = is*(blockDim.x+2)+js;
int idg = __mul24(ig, pitch) + jg;
int jgp = jg+1;
if (jgp == ns) jgp = 0;
// We perform a coalesced read of 'rhos' into the shared memory;
srhos[ids] = rhos[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
srhos[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (rhos, jgp, ig, pitch);
// We perform a coalesced read of 'qrs' into the shared memory;
sqrs[ids] = qrs[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
sqrs[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (qrs, jgp, ig, pitch);
// We perform a coalesced read of 'vt' into the shared memory;
svt[ids] = vt[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
svt[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (vt, jgp, ig, pitch);
__syncthreads ();
const double dr = CRadiiStuff[(nr+1)*8+ig] - CRadiiStuff[(nr+1)*4+ig];
// invsurf = CRadiiStuff[(nr+1)*7+ig];
//const double fluxm = sqrs[ids] * svt[ids] * srhos[ids];
//const double fluxp = sqrs[ids+1] * svt[ids+1] * srhos[ids+1];
const double fluxm = sqrs[ids] * svt[ids];
const double fluxp = sqrs[ids+1] * svt[ids+1];
//qb[idg] += ((fluxm - fluxp) - srhos[ids]*sqrs[ids]*(svt[ids] - svt[ids+1])) * invsurf * dr * dt;
qb[idg] += ((fluxm - fluxp) - sqrs[ids]*(svt[ids] - svt[ids+1])) * invsurf * dr * dt;
}
extern "C"
void VanLeerThetaDustSize_gpu_cu (PolarGrid *Vtheta, PolarGrid *Qbase, double dt)
{
int nr, ns;
nr = Vtheta->Nrad;
ns = Vtheta->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
checkCudaErrors(hipMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(14*(nr+1))*sizeof(double), 0, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( kernel_vlthds) , dim3(grid), dim3(block) , 0, 0, RhoStar->gpu_field,
QRStar->gpu_field,
Vtheta->gpu_field,
Qbase->gpu_field,
((double) ns)/2.0/M_PI,
Vtheta->Nsec,
Vtheta->Nrad,
Vtheta->pitch/sizeof(double),
dt);
hipDeviceSynchronize();
getLastCudaError ("kernel_vlthds failed");
}
|
61aca7b84ab61e6d3f477aaa1d8d9bfbedb1d800.cu
|
/** \file "vanleertheta.cu" : implements the kernel for the "VanLeerTheta" procedure
*/
#define __CUDA 1
#include "fargo.h"
#undef __CUDA
#include <stdarg.h>
#include <helper_cuda.h>
#include <cuda.h>
// BLOCK_X : in azimuth
//#define BLOCK_X 16
//#define BLOCK_X DEF_BLOCK_X_VANLEERTHETA
#define BLOCK_X 32
// BLOCK_Y : in radius
#define BLOCK_Y 8
#define GET_TAB(u,x,y,pitch) *(u + __mul24(y, pitch) + x)
#define invsurf CRadiiStuff[(nr+1)*7+ig]
#define invrmed CRadiiStuff[(nr+1)*2 + ig]
//__constant__ double CRadiiStuff[8192];
__device__ double CRadiiStuff[32768];
extern PolarGrid *RhoStar, *QRStar, *Work;
__global__ void kernel_vlth (double *rhos,
double *qrs,
double *vt,
double *qb,
int ns,
int nr,
int pitch,
double dt) {
__shared__ double srhos[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double svt[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double sqrs[(BLOCK_X+2)*(BLOCK_Y+2)];
//double dr, invsurf, fluxp, fluxm;
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
// js & is, l like 'local' (shared memory <=> local patch)
int js = threadIdx.x + 1;
int is = threadIdx.y + 1;
int ids = is*(blockDim.x+2)+js;
int idg = __mul24(ig, pitch) + jg;
int jgp = jg+1;
if (jgp == ns) jgp = 0;
// We perform a coalesced read of 'rhos' into the shared memory;
srhos[ids] = rhos[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
srhos[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (rhos, jgp, ig, pitch);
// We perform a coalesced read of 'qrs' into the shared memory;
sqrs[ids] = qrs[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
sqrs[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (qrs, jgp, ig, pitch);
// We perform a coalesced read of 'vt' into the shared memory;
svt[ids] = vt[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
svt[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (vt, jgp, ig, pitch);
__syncthreads ();
const double dr = CRadiiStuff[(nr+1)*8+ig] - CRadiiStuff[(nr+1)*4+ig];
// invsurf = CRadiiStuff[(nr+1)*7+ig];
const double fluxm = sqrs[ids] * srhos[ids] * svt[ids];
const double fluxp = sqrs[ids+1] * srhos[ids+1] * svt[ids+1];
// qb[idg] += __dadd_rn(fluxm,-fluxp)*invsurf*dr*dt;
qb[idg] += (fluxm - fluxp)*invsurf*dr*dt;
}
extern "C"
void VanLeerTheta_gpu_cu (PolarGrid *Vtheta, PolarGrid *Qbase, double dt)
{
int nr, ns;
nr = Vtheta->Nrad;
ns = Vtheta->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(14*(nr+1))*sizeof(double), 0, cudaMemcpyHostToDevice));
kernel_vlth <<< grid, block >>> (RhoStar->gpu_field,
QRStar->gpu_field,
Vtheta->gpu_field,
Qbase->gpu_field,
Vtheta->Nsec,
Vtheta->Nrad,
Vtheta->pitch/sizeof(double),
dt);
cudaThreadSynchronize();
cudaError_t err = cudaGetLastError ();
if ( cudaSuccess != err) {
fprintf (stderr, "Cuda error kernel vlth failed \t%s\n", cudaGetErrorString (err));
exit (-1);
}
}
__global__ void kernel_vlthds (double *rhos,
double *qrs,
double *vt,
double *qb,
double invdphi,
int ns,
int nr,
int pitch,
double dt) {
__shared__ double srhos[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double svt[(BLOCK_X+2)*(BLOCK_Y+2)];
__shared__ double sqrs[(BLOCK_X+2)*(BLOCK_Y+2)];
//double dr, invsurf, fluxp, fluxm;
int jg = threadIdx.x + blockIdx.x * blockDim.x;
int ig = threadIdx.y + blockIdx.y * blockDim.y;
// js & is, l like 'local' (shared memory <=> local patch)
int js = threadIdx.x + 1;
int is = threadIdx.y + 1;
int ids = is*(blockDim.x+2)+js;
int idg = __mul24(ig, pitch) + jg;
int jgp = jg+1;
if (jgp == ns) jgp = 0;
// We perform a coalesced read of 'rhos' into the shared memory;
srhos[ids] = rhos[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
srhos[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (rhos, jgp, ig, pitch);
// We perform a coalesced read of 'qrs' into the shared memory;
sqrs[ids] = qrs[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
sqrs[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (qrs, jgp, ig, pitch);
// We perform a coalesced read of 'vt' into the shared memory;
svt[ids] = vt[idg];
// EDGE 2: "RIGHT EDGE".
if (js == blockDim.x)
svt[is*(blockDim.x+2)+blockDim.x+1] = GET_TAB (vt, jgp, ig, pitch);
__syncthreads ();
const double dr = CRadiiStuff[(nr+1)*8+ig] - CRadiiStuff[(nr+1)*4+ig];
// invsurf = CRadiiStuff[(nr+1)*7+ig];
//const double fluxm = sqrs[ids] * svt[ids] * srhos[ids];
//const double fluxp = sqrs[ids+1] * svt[ids+1] * srhos[ids+1];
const double fluxm = sqrs[ids] * svt[ids];
const double fluxp = sqrs[ids+1] * svt[ids+1];
//qb[idg] += ((fluxm - fluxp) - srhos[ids]*sqrs[ids]*(svt[ids] - svt[ids+1])) * invsurf * dr * dt;
qb[idg] += ((fluxm - fluxp) - sqrs[ids]*(svt[ids] - svt[ids+1])) * invsurf * dr * dt;
}
extern "C"
void VanLeerThetaDustSize_gpu_cu (PolarGrid *Vtheta, PolarGrid *Qbase, double dt)
{
int nr, ns;
nr = Vtheta->Nrad;
ns = Vtheta->Nsec;
dim3 block (BLOCK_X, BLOCK_Y);
dim3 grid ((ns+block.x-1)/block.x, (nr+block.y-1)/block.y);
checkCudaErrors(cudaMemcpyToSymbol(CRadiiStuff, (void *)RadiiStuff, (size_t)(14*(nr+1))*sizeof(double), 0, cudaMemcpyHostToDevice));
kernel_vlthds <<< grid, block >>> (RhoStar->gpu_field,
QRStar->gpu_field,
Vtheta->gpu_field,
Qbase->gpu_field,
((double) ns)/2.0/M_PI,
Vtheta->Nsec,
Vtheta->Nrad,
Vtheta->pitch/sizeof(double),
dt);
cudaThreadSynchronize();
getLastCudaError ("kernel_vlthds failed");
}
|
767b7f1067ace2784f3e08eaee4e0c60bb48faee.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "pointwise_hist2.cuh"
#include "split_properties_helpers.cuh"
#include "compute_point_hist2_loop.cuh"
#include <hip/hip_cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template<int OUTER_HIST_BITS_COUNT,
int INNER_HIST_BITS_COUNT,
int BLOCK_SIZE>
struct TPointHist {
float* __restrict__ Buffer;
float mostRecentStat1[4];
float mostRecentStat2[4];
uchar mostRecentBin[4];
__forceinline__ __device__ int SliceOffset() {
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
static_assert(OUTER_HIST_BITS_COUNT <= 2, "Error: assume 12 warps, so limited by 128-bin histogram per warp");
static_assert(OUTER_HIST_BITS_COUNT > 0 && INNER_HIST_BITS_COUNT > 0, "This histogram is specialized for 255 bin count");
const int warpId = (threadIdx.x / 32) % maxBlocks;
const int warpOffset = (1024 << OUTER_HIST_BITS_COUNT) * warpId;
const int blocks = 4 >> INNER_HIST_BITS_COUNT;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
#pragma unroll
for (int f = 0; f < 4; ++f) {
mostRecentBin[f] = 0;
mostRecentStat1[f] = 0;
mostRecentStat2[f] = 0;
}
}
__forceinline__ __device__ void Add(float val, float* dst) {
atomicAdd(dst, val);
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
if (bin != mostRecentBin[i]) {
const bool pass = (mostRecentBin[i] >> (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT)) == 0;
if (pass) {
int offset = 2 * f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
offset += flag;
Add(mostRecentStat1[i], Buffer + offset);
offset = flag ? offset - 1 : offset + 1;
Add(mostRecentStat2[i], Buffer + offset);
}
mostRecentBin[i] = bin;
mostRecentStat1[i] = 0;
mostRecentStat2[i] = 0;
}
{
mostRecentStat1[i] += stat1;
mostRecentStat2[i] += stat2;
}
}
}
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
AddPoint(ci.x, t.x, w.x);
AddPoint(ci.y, t.y, w.y);
AddPoint(ci.z, t.z, w.z);
AddPoint(ci.w, t.w, w.w);
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const bool pass = (mostRecentBin[i] >> (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT)) == 0;
if (pass) {
int offset = 2 * f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
Add(mostRecentStat1[i], Buffer + offset + flag);
Add(mostRecentStat2[i], Buffer + offset + !flag);
}
}
}
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024 << OUTER_HIST_BITS_COUNT;
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll maxBlocks
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
float sum[4];
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
for (int fold = (threadIdx.x >> 1); fold < maxFoldCount; fold += 128) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] = 0;
}
const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT;
const int lowBitMask = (1 << INNER_HIST_BITS_COUNT) - 1;
const float* __restrict__ src = Buffer
+ (1024 << OUTER_HIST_BITS_COUNT) //warpHistSize
+ 8 * (fold & lowBitMask)
+ 32 * (fold >> INNER_HIST_BITS_COUNT)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] += src[2 * f + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))];
}
}
#pragma unroll
for (int f = 0; f < 4; ++f) {
Buffer[2 * (maxFoldCount * f + fold) + w] = sum[f];
}
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 0, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 4;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void Add(float val, float* dst) {
dst[0] += val;
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int bin = (ci >> (24 - (f << 2))) & 255;
const float pass = bin != 32 ? 1.0f : 0.0f;
syncTile.sync();
int offset = f + 32 * (bin & 31);
const int offset1 = offset + flag;
const float add1 = pass * stat1;
Buffer[offset1] += add1;
const int offset2 = offset + !flag;
const float add2 = pass * stat2;
syncTile.sync();
Buffer[offset2] += add2;
}
}
__forceinline__ __device__ void AddPoint2(uint2 ci,
const float2 t,
const float2 w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float2 stat1 = flag ? t : w;
const float2 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const int bin1 = (ci.x >> (24 - (f << 2))) & 255;
const int bin2 = (ci.y >> (24 - (f << 2))) & 255;
const float passx = bin1 != 32 ? 1.0f : 0.0f;
const float passy = bin2 != 32 ? 1.0f : 0.0f;
int offsetx = f + 32 * (bin1 & 31) + flag;
int offsety = f + 32 * (bin2 & 31) + flag;
syncTile.sync();
Buffer[offsetx] += passx * stat1.x;
Buffer[offsety] += passy * stat1.y;
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
syncTile.sync();
Buffer[offsetx] += passx * stat2.x;
Buffer[offsety] += passy * stat2.y;
}
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float4 stat1 = flag ? t : w;
const float4 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const ui32 shift = static_cast<ui32>(24 - (f << 2));
f += flag;
const int binx = (ci.x >> shift) & 255;
const int biny = (ci.y >> shift) & 255;
const int binz = (ci.z >> shift) & 255;
const int binw = (ci.w >> shift) & 255;
const float passx = binx != 32 ? 1.0f : 0.0f;
const float passy = biny != 32 ? 1.0f : 0.0f;
const float passz = binz != 32 ? 1.0f : 0.0f;
const float passw = binw != 32 ? 1.0f : 0.0f;
float* buffer = Buffer + f;
int offsetx = (binx & 31) << 5;
int offsety = (biny & 31) << 5;
int offsetz = (binz & 31) << 5;
int offsetw = (binw & 31) << 5;
syncTile.sync();
buffer[offsetx] += passx * stat1.x;
buffer[offsety] += passy * stat1.y;
buffer[offsetz] += passz * stat1.z;
buffer[offsetw] += passw * stat1.w;
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
offsetz += flag ? -1 : 1;
offsetw += flag ? -1 : 1;
syncTile.sync();
buffer[offsetx] += passx * stat2.x;
buffer[offsety] += passy * stat2.y;
buffer[offsetz] += passz * stat2.z;
buffer[offsetw] += passw * stat2.w;
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum = 0.0f;
const int fold = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 32;
if (fold < maxFoldCount) {
const int innerHistCount = 4;
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 32 * fold
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum += src[2 * f + (inWarpHist << 3)];
}
Buffer[2 * (maxFoldCount * f + fold) + w] = sum;
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 1, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 2;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 4));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
const bool pass = bin != 64;
int offset = 2 * f;
offset += 16 * (bin & 62) + 8 * (bin & 1);
const bool writeFirstFlag = threadIdx.x & 8;
const float val1 = pass * stat1;
offset += flag;
syncTile.sync();
if (writeFirstFlag) {
Buffer[offset] += val1;
}
syncTile.sync();
if (!writeFirstFlag) {
Buffer[offset] += val1;
}
const float val2 = pass * stat2;
offset = flag ? offset - 1 : offset + 1;
syncTile.sync();
if (writeFirstFlag) {
Buffer[offset] += val2;
}
syncTile.sync();
if (!writeFirstFlag) {
Buffer[offset] += val2;
}
}
}
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float4 stat1 = flag ? t : w;
const float4 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const ui32 shift = static_cast<ui32>(24 - (f << 2));
f += flag;
const int binx = (ci.x >> shift) & 255;
const int biny = (ci.y >> shift) & 255;
const int binz = (ci.z >> shift) & 255;
const int binw = (ci.w >> shift) & 255;
const float passx = binx != 64;
const float passy = biny != 64;
const float passz = binz != 64;
const float passw = binw != 64;
float* buffer = Buffer + f;
syncTile.sync();
int offsetx = 16 * (binx & 62) + 8 * (binx & 1);
int offsety = 16 * (biny & 62) + 8 * (biny & 1);
int offsetz = 16 * (binz & 62) + 8 * (binz & 1);
int offsetw = 16 * (binw & 62) + 8 * (binw & 1);
const bool writeFirstFlag = threadIdx.x & 8;
const float valx = passx * stat1.x;
const float valy = passy * stat1.y;
const float valz = passz * stat1.z;
const float valw = passw * stat1.w;
if (writeFirstFlag) {
buffer[offsetx] += valx;
buffer[offsety] += valy;
buffer[offsetz] += valz;
buffer[offsetw] += valw;
}
syncTile.sync();
if (!writeFirstFlag) {
buffer[offsetx] += valx;
buffer[offsety] += valy;
buffer[offsetz] += valz;
buffer[offsetw] += valw;
}
const float val2x = passx * stat2.x;
const float val2y = passy * stat2.y;
const float val2z = passz * stat2.z;
const float val2w = passw * stat2.w;
syncTile.sync();
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
offsetz += flag ? -1 : 1;
offsetw += flag ? -1 : 1;
if (writeFirstFlag) {
buffer[offsetx] += val2x;
buffer[offsety] += val2y;
buffer[offsetz] += val2z;
buffer[offsetw] += val2w;
}
syncTile.sync();
if (!writeFirstFlag) {
buffer[offsetx] += val2x;
buffer[offsety] += val2y;
buffer[offsetz] += val2z;
buffer[offsetw] += val2w;
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum0 = 0.0f;
float sum1 = 0.0f;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 64;
{
const int innerHistCount = 2;
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 8 * (fold0 & 1)
+ 32 * (fold0 >> 1)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum0 += src[2 * f + (inWarpHist << 4)];
sum1 += src[2 * f + (inWarpHist << 4) + 512];
}
Buffer[2 * (maxFoldCount * f + fold0) + w] = sum0;
Buffer[2 * (maxFoldCount * f + fold0 + 32) + w] = sum1;
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 2, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
return warpOffset;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int bin = (ci >> (24 - (f << 2))) & 255;
const float pass = bin != 128;
int offset = f;
offset += 8 * (bin & 127);
//
const int writeTime = (threadIdx.x >> 3) & 3;
const float val1 = pass * stat1;
offset += flag;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val1;
}
__syncwarp();
}
const float val2 = pass * stat2;
offset = flag ? offset - 1 : offset + 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val2;
}
__syncwarp();
}
}
}
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
AddPoint(ci.x, t.x, w.x);
AddPoint(ci.y, t.y, w.y);
AddPoint(ci.z, t.z, w.z);
AddPoint(ci.w, t.w, w.w);
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 128;
{
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 2 * f
+ w;
#pragma unroll
for (int k = 0; k < 4; ++k) {
int fold = fold0 + 32 * k;
Buffer[2 * (maxFoldCount * f + fold) + w] = src[8 * fold];
}
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE, int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCKS_PER_FEATURE, bool USE_64_BIT_LOAD>
__forceinline__ __device__ void ComputeSplitPropertiesPass(const TCFeature* __restrict__ feature, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* smem) {
using THist = TPointHist<OUTER_HIST_BITS_COUNT, INNER_HIST_BITS_COUNT, BLOCK_SIZE>;
const int stripeSize = BLOCK_SIZE;
const int histBlockCount = 1;
if (USE_64_BIT_LOAD) {
#if __CUDA_ARCH__ < 300
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ < 700
const int OUTER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) <= 2 ? 4 : 2;
#else
const int OUTER_UNROLL = 1;//(INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) <= 2 ? 4 : 2;
#endif
const int size = partition->Size;
const int offset = partition->Offset;
#if __CUDA_ARCH__ >= 700
ComputeHistogram4 < stripeSize, OUTER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist > (indices, offset, size,
target,
weight,
cindex,
smem);
#else
ComputeHistogram2 < stripeSize, OUTER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist > (indices, offset, size,
target,
weight,
cindex,
smem);
#endif
} else {
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 2;
#endif
ComputeHistogram<stripeSize, OUTER_UNROLL, INNER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist>(indices,
partition->Offset,
partition->Size,
target,
weight,
cindex,
smem);
}
__syncthreads();
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
const int fid = (threadIdx.x / 64);
const int w = threadIdx.x & 1;
const int featureFolds = fid < fCount ? feature[fid].Folds : 0;
const int featureOffset = fid * maxFoldCount * 2 + w;
for (int fold = (threadIdx.x / 2) & 31; fold < featureFolds; fold += 32) {
if (fid < fCount) {
const float val = smem[featureOffset + 2 * fold];
if (abs(val) > 1e-20f) {
if (BLOCKS_PER_FEATURE > 1) {
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold) * 2 + w, val);
} else {
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold) * 2 + w, val);
}
}
}
}
}
#define DECLARE_PASS(O, I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BLOCK_SIZE, O, I, M, USE_64_BIT_LOAD>(feature, cindex, target, weight, indices, partition, fCount, binSums, &counters[0]);
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BLOCK_SIZE, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BLOCK_SIZE];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, 0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(0, 1, M, use64BitLoad);
} else if (maxBinCount <= 128) {
DECLARE_PASS(0, 2, M, use64BitLoad);
} else {
DECLARE_PASS(2, 1, M, use64BitLoad);
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
}
}
void ComputeHist2NonBinary(const TCFeature* nbFeatures, ui32 nbCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition, ui32 partCount, ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
if (nbCount) {
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const int histPartCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histPartCount;
numBlocks.z = foldCount;
const int blockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
#define COMPUTE(k)\
RunComputeHist2NonBinaryKernel<blockSize, k>(nbFeatures, nbCount, cindex, target, weight, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
}
|
767b7f1067ace2784f3e08eaee4e0c60bb48faee.cu
|
#include "pointwise_hist2.cuh"
#include "split_properties_helpers.cuh"
#include "compute_point_hist2_loop.cuh"
#include <cooperative_groups.h>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
using namespace cooperative_groups;
namespace NKernel
{
template<int OUTER_HIST_BITS_COUNT,
int INNER_HIST_BITS_COUNT,
int BLOCK_SIZE>
struct TPointHist {
float* __restrict__ Buffer;
float mostRecentStat1[4];
float mostRecentStat2[4];
uchar mostRecentBin[4];
__forceinline__ __device__ int SliceOffset() {
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
static_assert(OUTER_HIST_BITS_COUNT <= 2, "Error: assume 12 warps, so limited by 128-bin histogram per warp");
static_assert(OUTER_HIST_BITS_COUNT > 0 && INNER_HIST_BITS_COUNT > 0, "This histogram is specialized for 255 bin count");
const int warpId = (threadIdx.x / 32) % maxBlocks;
const int warpOffset = (1024 << OUTER_HIST_BITS_COUNT) * warpId;
const int blocks = 4 >> INNER_HIST_BITS_COUNT;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << (INNER_HIST_BITS_COUNT + 3)));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
#pragma unroll
for (int f = 0; f < 4; ++f) {
mostRecentBin[f] = 0;
mostRecentStat1[f] = 0;
mostRecentStat2[f] = 0;
}
}
__forceinline__ __device__ void Add(float val, float* dst) {
atomicAdd(dst, val);
}
__forceinline__ __device__ void AddPoint(ui32 ci, const float t, const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
if (bin != mostRecentBin[i]) {
const bool pass = (mostRecentBin[i] >> (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT)) == 0;
if (pass) {
int offset = 2 * f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
offset += flag;
Add(mostRecentStat1[i], Buffer + offset);
offset = flag ? offset - 1 : offset + 1;
Add(mostRecentStat2[i], Buffer + offset);
}
mostRecentBin[i] = bin;
mostRecentStat1[i] = 0;
mostRecentStat2[i] = 0;
}
{
mostRecentStat1[i] += stat1;
mostRecentStat2[i] += stat2;
}
}
}
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
AddPoint(ci.x, t.x, w.x);
AddPoint(ci.y, t.y, w.y);
AddPoint(ci.z, t.z, w.z);
AddPoint(ci.w, t.w, w.w);
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
{
const bool flag = threadIdx.x & 1;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const bool pass = (mostRecentBin[i] >> (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT)) == 0;
if (pass) {
int offset = 2 * f;
const uchar mask = (1 << INNER_HIST_BITS_COUNT) - 1;
offset += 8 * (mostRecentBin[i] & mask);
offset += 32 * ((mostRecentBin[i] >> INNER_HIST_BITS_COUNT));
Add(mostRecentStat1[i], Buffer + offset + flag);
Add(mostRecentStat2[i], Buffer + offset + !flag);
}
}
}
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024 << OUTER_HIST_BITS_COUNT;
const int maxBlocks = BLOCK_SIZE * 32 / (1024 << OUTER_HIST_BITS_COUNT);
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll maxBlocks
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
float sum[4];
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
for (int fold = (threadIdx.x >> 1); fold < maxFoldCount; fold += 128) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] = 0;
}
const int innerHistCount = 4 >> INNER_HIST_BITS_COUNT;
const int lowBitMask = (1 << INNER_HIST_BITS_COUNT) - 1;
const float* __restrict__ src = Buffer
+ (1024 << OUTER_HIST_BITS_COUNT) //warpHistSize
+ 8 * (fold & lowBitMask)
+ 32 * (fold >> INNER_HIST_BITS_COUNT)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
#pragma unroll
for (int f = 0; f < 4; ++f) {
sum[f] += src[2 * f + (inWarpHist << (3 + INNER_HIST_BITS_COUNT))];
}
}
#pragma unroll
for (int f = 0; f < 4; ++f) {
Buffer[2 * (maxFoldCount * f + fold) + w] = sum[f];
}
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 0, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 4;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 3));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void Add(float val, float* dst) {
dst[0] += val;
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int bin = (ci >> (24 - (f << 2))) & 255;
const float pass = bin != 32 ? 1.0f : 0.0f;
syncTile.sync();
int offset = f + 32 * (bin & 31);
const int offset1 = offset + flag;
const float add1 = pass * stat1;
Buffer[offset1] += add1;
const int offset2 = offset + !flag;
const float add2 = pass * stat2;
syncTile.sync();
Buffer[offset2] += add2;
}
}
__forceinline__ __device__ void AddPoint2(uint2 ci,
const float2 t,
const float2 w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float2 stat1 = flag ? t : w;
const float2 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const int bin1 = (ci.x >> (24 - (f << 2))) & 255;
const int bin2 = (ci.y >> (24 - (f << 2))) & 255;
const float passx = bin1 != 32 ? 1.0f : 0.0f;
const float passy = bin2 != 32 ? 1.0f : 0.0f;
int offsetx = f + 32 * (bin1 & 31) + flag;
int offsety = f + 32 * (bin2 & 31) + flag;
syncTile.sync();
Buffer[offsetx] += passx * stat1.x;
Buffer[offsety] += passy * stat1.y;
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
syncTile.sync();
Buffer[offsetx] += passx * stat2.x;
Buffer[offsety] += passy * stat2.y;
}
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float4 stat1 = flag ? t : w;
const float4 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const ui32 shift = static_cast<ui32>(24 - (f << 2));
f += flag;
const int binx = (ci.x >> shift) & 255;
const int biny = (ci.y >> shift) & 255;
const int binz = (ci.z >> shift) & 255;
const int binw = (ci.w >> shift) & 255;
const float passx = binx != 32 ? 1.0f : 0.0f;
const float passy = biny != 32 ? 1.0f : 0.0f;
const float passz = binz != 32 ? 1.0f : 0.0f;
const float passw = binw != 32 ? 1.0f : 0.0f;
float* buffer = Buffer + f;
int offsetx = (binx & 31) << 5;
int offsety = (biny & 31) << 5;
int offsetz = (binz & 31) << 5;
int offsetw = (binw & 31) << 5;
syncTile.sync();
buffer[offsetx] += passx * stat1.x;
buffer[offsety] += passy * stat1.y;
buffer[offsetz] += passz * stat1.z;
buffer[offsetw] += passw * stat1.w;
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
offsetz += flag ? -1 : 1;
offsetw += flag ? -1 : 1;
syncTile.sync();
buffer[offsetx] += passx * stat2.x;
buffer[offsety] += passy * stat2.y;
buffer[offsetz] += passz * stat2.z;
buffer[offsetw] += passw * stat2.w;
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum = 0.0f;
const int fold = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 32;
if (fold < maxFoldCount) {
const int innerHistCount = 4;
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 32 * fold
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum += src[2 * f + (inWarpHist << 3)];
}
Buffer[2 * (maxFoldCount * f + fold) + w] = sum;
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 1, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
const int blocks = 2;
const int innerHistStart = (threadIdx.x & ((blocks - 1) << 4));
return warpOffset + innerHistStart;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const short f = ((i + threadIdx.x / 2) & 3);
const uchar bin = bfe(ci, 24 - (f << 3), 8);
const bool pass = bin != 64;
int offset = 2 * f;
offset += 16 * (bin & 62) + 8 * (bin & 1);
const bool writeFirstFlag = threadIdx.x & 8;
const float val1 = pass * stat1;
offset += flag;
syncTile.sync();
if (writeFirstFlag) {
Buffer[offset] += val1;
}
syncTile.sync();
if (!writeFirstFlag) {
Buffer[offset] += val1;
}
const float val2 = pass * stat2;
offset = flag ? offset - 1 : offset + 1;
syncTile.sync();
if (writeFirstFlag) {
Buffer[offset] += val2;
}
syncTile.sync();
if (!writeFirstFlag) {
Buffer[offset] += val2;
}
}
}
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
thread_block_tile<32> syncTile = tiled_partition<32>(this_thread_block());
const bool flag = threadIdx.x & 1;
const float4 stat1 = flag ? t : w;
const float4 stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
int f = ((2 * i + threadIdx.x) & 6);
const ui32 shift = static_cast<ui32>(24 - (f << 2));
f += flag;
const int binx = (ci.x >> shift) & 255;
const int biny = (ci.y >> shift) & 255;
const int binz = (ci.z >> shift) & 255;
const int binw = (ci.w >> shift) & 255;
const float passx = binx != 64;
const float passy = biny != 64;
const float passz = binz != 64;
const float passw = binw != 64;
float* buffer = Buffer + f;
syncTile.sync();
int offsetx = 16 * (binx & 62) + 8 * (binx & 1);
int offsety = 16 * (biny & 62) + 8 * (biny & 1);
int offsetz = 16 * (binz & 62) + 8 * (binz & 1);
int offsetw = 16 * (binw & 62) + 8 * (binw & 1);
const bool writeFirstFlag = threadIdx.x & 8;
const float valx = passx * stat1.x;
const float valy = passy * stat1.y;
const float valz = passz * stat1.z;
const float valw = passw * stat1.w;
if (writeFirstFlag) {
buffer[offsetx] += valx;
buffer[offsety] += valy;
buffer[offsetz] += valz;
buffer[offsetw] += valw;
}
syncTile.sync();
if (!writeFirstFlag) {
buffer[offsetx] += valx;
buffer[offsety] += valy;
buffer[offsetz] += valz;
buffer[offsetw] += valw;
}
const float val2x = passx * stat2.x;
const float val2y = passy * stat2.y;
const float val2z = passz * stat2.z;
const float val2w = passw * stat2.w;
syncTile.sync();
offsetx += flag ? -1 : 1;
offsety += flag ? -1 : 1;
offsetz += flag ? -1 : 1;
offsetw += flag ? -1 : 1;
if (writeFirstFlag) {
buffer[offsetx] += val2x;
buffer[offsety] += val2y;
buffer[offsetz] += val2z;
buffer[offsetw] += val2w;
}
syncTile.sync();
if (!writeFirstFlag) {
buffer[offsetx] += val2x;
buffer[offsety] += val2y;
buffer[offsetz] += val2z;
buffer[offsetw] += val2w;
}
}
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
float sum0 = 0.0f;
float sum1 = 0.0f;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 64;
{
const int innerHistCount = 2;
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 8 * (fold0 & 1)
+ 32 * (fold0 >> 1)
+ w;
#pragma unroll
for (int inWarpHist = 0; inWarpHist < innerHistCount; ++inWarpHist) {
sum0 += src[2 * f + (inWarpHist << 4)];
sum1 += src[2 * f + (inWarpHist << 4) + 512];
}
Buffer[2 * (maxFoldCount * f + fold0) + w] = sum0;
Buffer[2 * (maxFoldCount * f + fold0 + 32) + w] = sum1;
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
struct TPointHist<0, 2, BLOCK_SIZE> {
float* __restrict__ Buffer;
__forceinline__ __device__ int SliceOffset() {
const int warpId = (threadIdx.x / 32);
const int warpOffset = 1024 * warpId;
return warpOffset;
}
__forceinline__ __device__ TPointHist(float* buff) {
const int HIST_SIZE = 32 * BLOCK_SIZE;
#pragma unroll 8
for (int i = threadIdx.x; i < HIST_SIZE; i += BLOCK_SIZE) {
buff[i] = 0;
}
Buffer = buff + SliceOffset();
__syncthreads();
}
__forceinline__ __device__ void AddPoint(ui32 ci,
const float t,
const float w) {
const bool flag = threadIdx.x & 1;
const float stat1 = flag ? t : w;
const float stat2 = flag ? w : t;
#pragma unroll
for (int i = 0; i < 4; i++) {
const int f = ((2 * i + threadIdx.x) & 6);
const int bin = (ci >> (24 - (f << 2))) & 255;
const float pass = bin != 128;
int offset = f;
offset += 8 * (bin & 127);
//
const int writeTime = (threadIdx.x >> 3) & 3;
const float val1 = pass * stat1;
offset += flag;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val1;
}
__syncwarp();
}
const float val2 = pass * stat2;
offset = flag ? offset - 1 : offset + 1;
#pragma unroll
for (int k = 0; k < 4; ++k) {
if (k == writeTime) {
Buffer[offset] += val2;
}
__syncwarp();
}
}
}
__forceinline__ __device__ void AddPoint2(uint2 bin, const float2 t, const float2 w) {
AddPoint(bin.x, t.x, w.x);
AddPoint(bin.y, t.y, w.y);
}
__forceinline__ __device__ void AddPoint4(uint4 ci, const float4 t, const float4 w) {
AddPoint(ci.x, t.x, w.x);
AddPoint(ci.y, t.y, w.y);
AddPoint(ci.z, t.z, w.z);
AddPoint(ci.w, t.w, w.w);
}
//After reduce we store histograms by blocks: 256 floats (4 x 2 x 32)
// for first 32 bins; than 256 floats for second 32 bins, etc
__forceinline__ __device__ void Reduce() {
Buffer -= SliceOffset();
__syncthreads();
{
const int warpHistSize = 1024;
for (int start = threadIdx.x; start < warpHistSize; start += BLOCK_SIZE) {
float sum = 0;
// 12 iterations at 32-bin
#pragma unroll 12
for (int i = start; i < 32 * BLOCK_SIZE; i += warpHistSize) {
sum += Buffer[i];
}
Buffer[warpHistSize + start] = sum;
}
}
__syncthreads();
if (threadIdx.x < 256) {
const int w = threadIdx.x & 1;
const int f = threadIdx.x / 64;
const int fold0 = (threadIdx.x >> 1) & 31;
const int maxFoldCount = 128;
{
const volatile float* __restrict__ src = Buffer
+ 1024 //warpHistSize
+ 2 * f
+ w;
#pragma unroll
for (int k = 0; k < 4; ++k) {
int fold = fold0 + 32 * k;
Buffer[2 * (maxFoldCount * f + fold) + w] = src[8 * fold];
}
}
}
__syncthreads();
}
};
template<int BLOCK_SIZE, int OUTER_HIST_BITS_COUNT, int INNER_HIST_BITS_COUNT, int BLOCKS_PER_FEATURE, bool USE_64_BIT_LOAD>
__forceinline__ __device__ void ComputeSplitPropertiesPass(const TCFeature* __restrict__ feature, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition, int fCount,
float* binSumsForPart,
float* smem) {
using THist = TPointHist<OUTER_HIST_BITS_COUNT, INNER_HIST_BITS_COUNT, BLOCK_SIZE>;
const int stripeSize = BLOCK_SIZE;
const int histBlockCount = 1;
if (USE_64_BIT_LOAD) {
#if __CUDA_ARCH__ < 300
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ < 700
const int OUTER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) <= 2 ? 4 : 2;
#else
const int OUTER_UNROLL = 1;//(INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) <= 2 ? 4 : 2;
#endif
const int size = partition->Size;
const int offset = partition->Offset;
#if __CUDA_ARCH__ >= 700
ComputeHistogram4 < stripeSize, OUTER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist > (indices, offset, size,
target,
weight,
cindex,
smem);
#else
ComputeHistogram2 < stripeSize, OUTER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist > (indices, offset, size,
target,
weight,
cindex,
smem);
#endif
} else {
#if __CUDA_ARCH__ < 300
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 4 : 2;
const int OUTER_UNROLL = 2;
#elif __CUDA_ARCH__ <= 350
const int INNER_UNROLL = (INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT) == 0 ? 8 : 4;
const int OUTER_UNROLL = 2;
#else
const int INNER_UNROLL = 1;
const int OUTER_UNROLL = 2;
#endif
ComputeHistogram<stripeSize, OUTER_UNROLL, INNER_UNROLL, histBlockCount, BLOCKS_PER_FEATURE, THist>(indices,
partition->Offset,
partition->Size,
target,
weight,
cindex,
smem);
}
__syncthreads();
const int maxFoldCount = (1 << (5 + INNER_HIST_BITS_COUNT + OUTER_HIST_BITS_COUNT));
const int fid = (threadIdx.x / 64);
const int w = threadIdx.x & 1;
const int featureFolds = fid < fCount ? feature[fid].Folds : 0;
const int featureOffset = fid * maxFoldCount * 2 + w;
for (int fold = (threadIdx.x / 2) & 31; fold < featureFolds; fold += 32) {
if (fid < fCount) {
const float val = smem[featureOffset + 2 * fold];
if (abs(val) > 1e-20f) {
if (BLOCKS_PER_FEATURE > 1) {
atomicAdd(binSumsForPart + (feature[fid].FirstFoldIndex + fold) * 2 + w, val);
} else {
WriteThrough(binSumsForPart + (feature[fid].FirstFoldIndex + fold) * 2 + w, val);
}
}
}
}
}
#define DECLARE_PASS(O, I, M, USE_64_BIT_LOAD) \
ComputeSplitPropertiesPass<BLOCK_SIZE, O, I, M, USE_64_BIT_LOAD>(feature, cindex, target, weight, indices, partition, fCount, binSums, &counters[0]);
template<int BLOCK_SIZE, bool FULL_PASS, int M>
#if __CUDA_ARCH__ == 600
__launch_bounds__(BLOCK_SIZE, 1)
#elif __CUDA_ARCH__ >= 520
__launch_bounds__(BLOCK_SIZE, 2)
#else
__launch_bounds__(BLOCK_SIZE, 1)
#endif
__global__ void ComputeSplitPropertiesNBImpl(
const TCFeature* __restrict__ feature, int fCount, const ui32* __restrict__ cindex,
const float* __restrict__ target, const float* __restrict__ weight,
const ui32* __restrict__ indices,
const TDataPartition* __restrict__ partition,
float* __restrict__ binSums,
const int totalFeatureCount) {
TPointwisePartOffsetsHelper helper(gridDim.z);
helper.ShiftPartAndBinSumsPtr(partition, binSums, totalFeatureCount, FULL_PASS);
feature += (blockIdx.x / M) * 4;
cindex += feature->Offset;
fCount = min(fCount - (blockIdx.x / M) * 4, 4);
__shared__ float counters[32 * BLOCK_SIZE];
const int maxBinCount = GetMaxBinCount(feature, fCount, (int*) &counters[0]);
__syncthreads();
//CatBoost always use direct loads on first pass of histograms calculation and for this step 64-bits loads are almost x2 faster
#if __CUDA_ARCH__ > 350
const bool use64BitLoad = FULL_PASS;// float2 for target/indices/weights
#else
const bool use64BitLoad = false;
#endif
if (partition->Size) {
if (maxBinCount <= 32) {
DECLARE_PASS(0, 0, M, use64BitLoad);
} else if (maxBinCount <= 64) {
DECLARE_PASS(0, 1, M, use64BitLoad);
} else if (maxBinCount <= 128) {
DECLARE_PASS(0, 2, M, use64BitLoad);
} else {
DECLARE_PASS(2, 1, M, use64BitLoad);
}
}
}
template<int BLOCK_SIZE,
int BLOCKS_PER_FEATURE_COUNT>
inline void RunComputeHist2NonBinaryKernel(const TCFeature* nbFeatures, int nbCount,
const ui32* cindex,
const float* target, const float* weight, const ui32* indices,
const TDataPartition* partition,
float* binSums, const int binFeatureCount,
bool fullPass,
TCudaStream stream,
dim3 numBlocks)
{
if (fullPass) {
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, true, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
} else {
ComputeSplitPropertiesNBImpl < BLOCK_SIZE, false, BLOCKS_PER_FEATURE_COUNT > << <numBlocks, BLOCK_SIZE, 0, stream>>>(
nbFeatures, nbCount, cindex, target, weight,
indices, partition, binSums, binFeatureCount
);
}
}
void ComputeHist2NonBinary(const TCFeature* nbFeatures, ui32 nbCount,
const ui32* cindex,
const float* target, const float* weight,
const ui32* indices, ui32 size,
const TDataPartition* partition, ui32 partCount, ui32 foldCount,
bool fullPass,
ui32 histLineSize,
float* binSums,
TCudaStream stream) {
if (nbCount) {
dim3 numBlocks;
numBlocks.x = (nbCount + 3) / 4;
const int histPartCount = (fullPass ? partCount : partCount / 2);
numBlocks.y = histPartCount;
numBlocks.z = foldCount;
const int blockSize = 384;
const ui32 multiplier = min(EstimateBlockPerFeatureMultiplier(numBlocks, size), 64);
numBlocks.x *= multiplier;
#define COMPUTE(k)\
RunComputeHist2NonBinaryKernel<blockSize, k>(nbFeatures, nbCount, cindex, target, weight, indices, \
partition, binSums, histLineSize, fullPass, stream, numBlocks);
if (multiplier == 1) {
COMPUTE(1)
} else if (multiplier == 2) {
COMPUTE(2)
} else if (multiplier == 4) {
COMPUTE(4)
} else if (multiplier == 8) {
COMPUTE(8)
} else if (multiplier == 16) {
COMPUTE(16)
} else if (multiplier == 32) {
COMPUTE(32)
} else if (multiplier == 64) {
COMPUTE(64)
} else {
exit(1);
}
#undef COMPUTE
}
}
}
|
542bdeb0eab06bd69e4894f5fa803e1bc7e5b6e5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
%%cu
#include <stdio.h>
#include <stdint.h>
#define FILTER_WIDTH 9
__constant__ float dc_filter[FILTER_WIDTH * FILTER_WIDTH];
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void readPnm(char *fileName, int &width, int &height, uchar3 *&pixels)
{
FILE *f = fopen(fileName, "r");
if (f == NULL)
{
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
if (strcmp(type, "P3") != 0) // In this exercise, we don't touch other types
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int max_val;
fscanf(f, "%i", &max_val);
if (max_val > 255) // In this exercise, we assume 1 byte per value
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i < width * height; i++)
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
fclose(f);
}
void writePnm(uchar3 *pixels, int width, int height, char *fileName)
{
FILE *f = fopen(fileName, "w");
if (f == NULL)
{
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++)
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
fclose(f);
}
__global__ void blurImgKernel1(uchar3 *inPixels, int width, int height,
float *filter, int filterWidth,
uchar3 *outPixels)
{
// TODO
int idxR = blockIdx.y * blockDim.y + threadIdx.y;
int idxC = blockIdx.x * blockDim.x + threadIdx.x;
int idx = idxR * width + idxC;
int filterPadding = filterWidth / 2;
if (idxR < height && idxC < width)
{
float3 outPixel = make_float3(0, 0, 0);
for (int fR = 0; fR < filterWidth; fR++)
{
for (int fC = 0; fC < filterWidth; fC++)
{
float filterVal = filter[fR * filterWidth + fC];
int inR = (idxR - filterPadding) + fR;
int inC = (idxC - filterPadding) + fC;
inR = min(height - 1, max(0, inR));
inC = min(width - 1, max(0, inC));
uchar3 inPixel = inPixels[inR * width + inC];
outPixel.x += filterVal * inPixel.x;
outPixel.y += filterVal * inPixel.y;
outPixel.z += filterVal * inPixel.z;
}
}
outPixels[idx] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
__global__ void blurImgKernel2(uchar3 *inPixels, int width, int height,
float *filter, int filterWidth,
uchar3 *outPixels)
{
// TODO
extern __shared__ uchar3 s_inPixels[];
int idxC = threadIdx.x + blockIdx.x * blockDim.x;
int idxR = threadIdx.y + blockIdx.y * blockDim.y;
int filterPadding = filterWidth / 2;
int shareBlockWidth = blockDim.x + filterWidth;
int inRow = idxR - filterPadding;
int inCol = idxC - filterPadding;
inRow = max(0, inRow);
inCol = max(0, inCol);
// SMEM copying : [-filterPadding, -filterPadding] -> [0, 0]
s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x] = inPixels[inRow * width + inCol];
// SMEM copying : [blockDim.x - filterPadding, -filterPadding] -> [0 + blockDim.x, 0]
if (floorf(threadIdx.x / filterWidth) == 0){
inRow = idxR - filterPadding;
inRow = max(0, inRow);
inCol = (idxC - filterPadding) + blockDim.x;
inCol = min(width - 1, max(0, inCol));
s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x + blockDim.x] = inPixels[inRow * width + inCol];
}
// SMEM copying : [-filterPadding, blockDim.y - filterPadding] -> [0, 0 + blockDim.y]
if (floorf(threadIdx.y / filterWidth) == 0){
inRow = (idxR - filterPadding) + blockDim.y;
inRow = min(height - 1, max(0, inRow));
inCol = idxC - filterPadding;
inCol = max(0, inCol);
s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + threadIdx.x] = inPixels[inRow * width + inCol];
}
// SMEM copying : [blockDim.x - filterPadding, blockDim.y - filterPadding] -> [0 + blockDim.x, 0 + blockDim.y]
if (floorf(threadIdx.y / filterWidth) == 0 && floorf(threadIdx.x / filterWidth) == 0){
inRow = (idxR - filterPadding) + blockDim.y;
inRow = min(height - 1, max(0, inRow));
inCol = (idxC - filterPadding) + blockDim.x;
inCol = min(width - 1, max(0, inCol));
s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + blockDim.x + threadIdx.x] =
inPixels[inRow * width + inCol];
}
__syncthreads();
// Convolution : [0, 0] -> [+filterPaddimg, +filterPadding]
if (idxC < width && idxR < height)
{
float3 outPixel = make_float3(0, 0, 0);
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
float filterVal = filter[filterR * filterWidth + filterC];
int inPixelR = threadIdx.y + filterR;
int inPixelC = threadIdx.x + filterC;
uchar3 inPixel = s_inPixels[inPixelR * shareBlockWidth + inPixelC];
outPixel.x += (filterVal * inPixel.x);
outPixel.y += (filterVal * inPixel.y);
outPixel.z += (filterVal * inPixel.z);
}
}
outPixels[idxR * width + idxC] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
__global__ void blurImgKernel3(uchar3 *inPixels, int width, int height,
int filterWidth,
uchar3 *outPixels)
{
// TODO
extern __shared__ uchar3 s_inPixels[];
int idxC = threadIdx.x + blockIdx.x * blockDim.x;
int idxR = threadIdx.y + blockIdx.y * blockDim.y;
int filterPadding = filterWidth / 2;
int shareBlockWidth = blockDim.x + filterWidth;
int inRow = idxR - filterPadding;
int inCol = idxC - filterPadding;
inRow = max(0, inRow);
inCol = max(0, inCol);
// SMEM copying : [-filterPadding, -filterPadding] -> [0, 0]
s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x] = inPixels[inRow * width + inCol];
// SMEM copying : [blockDim.x - filterPadding, -filterPadding] -> [0 + blockDim.x, 0]
if (floorf(threadIdx.x / filterWidth) == 0){
inRow = idxR - filterPadding;
inRow = max(0, inRow);
inCol = (idxC - filterPadding) + blockDim.x;
inCol = min(width - 1, max(0, inCol));
s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x + blockDim.x] = inPixels[inRow * width + inCol];
}
// SMEM copying : [-filterPadding, blockDim.y - filterPadding] -> [0, 0 + blockDim.y]
if (floorf(threadIdx.y / filterWidth) == 0){
inRow = (idxR - filterPadding) + blockDim.y;
inRow = min(height - 1, max(0, inRow));
inCol = idxC - filterPadding;
inCol = max(0, inCol);
s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + threadIdx.x] = inPixels[inRow * width + inCol];
}
// SMEM copying : [blockDim.x - filterPadding, blockDim.y - filterPadding] -> [0 + blockDim.x, 0 + blockDim.y]
if (floorf(threadIdx.y / filterWidth) == 0 && floorf(threadIdx.x / filterWidth) == 0){
inRow = (idxR - filterPadding) + blockDim.y;
inRow = min(height - 1, max(0, inRow));
inCol = (idxC - filterPadding) + blockDim.x;
inCol = min(width - 1, max(0, inCol));
s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + blockDim.x + threadIdx.x] = inPixels[inRow * width + inCol];
}
__syncthreads();
// Convolution : [0, 0] -> [+filterPaddimg, +filterPadding]
if (idxC < width && idxR < height)
{
float3 outPixel = make_float3(0, 0, 0);
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
float filterVal = dc_filter[filterR * filterWidth + filterC];
int inPixelR = threadIdx.y + filterR;
int inPixelC = threadIdx.x + filterC;
uchar3 inPixel = s_inPixels[inPixelR * shareBlockWidth + inPixelC];
outPixel.x += (filterVal * inPixel.x);
outPixel.y += (filterVal * inPixel.y);
outPixel.z += (filterVal * inPixel.z);
}
}
outPixels[idxR * width + idxC] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
void blurImg(uchar3 *inPixels, int width, int height, float *filter, int filterWidth,
uchar3 *outPixels,
bool useDevice = false, dim3 blockSize = dim3(1, 1), int kernelType = 1)
{
if (useDevice == false)
{
for (int outPixelsR = 0; outPixelsR < height; outPixelsR++)
{
for (int outPixelsC = 0; outPixelsC < width; outPixelsC++)
{
float3 outPixel = make_float3(0, 0, 0);
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
float filterVal = filter[filterR * filterWidth + filterC];
int inR = outPixelsR - filterWidth / 2 + filterR;
int inC = outPixelsC - filterWidth / 2 + filterC;
inR = min(max(0, inR), height - 1);
inC = min(max(0, inC), width - 1);
uchar3 inPixel = inPixels[inR * width + inC];
outPixel.x += filterVal * inPixel.x;
outPixel.y += filterVal * inPixel.y;
outPixel.z += filterVal * inPixel.z;
}
}
outPixels[outPixelsR * width + outPixelsC] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
}
else // Use device
{
GpuTimer timer;
printf("\nKernel %i, ", kernelType);
// Allocate device memories
uchar3 *d_inPixels, *d_outPixels;
float *d_filter;
size_t pixelsSize = width * height * sizeof(uchar3);
size_t filterSize = filterWidth * filterWidth * sizeof(float);
CHECK(hipMalloc(&d_inPixels, pixelsSize));
CHECK(hipMalloc(&d_outPixels, pixelsSize));
if (kernelType == 1 || kernelType == 2)
{
CHECK(hipMalloc(&d_filter, filterSize));
}
// Copy data to device memories
CHECK(hipMemcpy(d_inPixels, inPixels, pixelsSize, hipMemcpyHostToDevice));
if (kernelType == 1 || kernelType == 2)
{
CHECK(hipMemcpy(d_filter, filter, filterSize, hipMemcpyHostToDevice));
}
else
{
// TODO: copy data from "filter" (on host) to "dc_filter" (on CMEM of device)
CHECK(hipMemcpyToSymbol(dc_filter, filter, filterSize));
}
// Call kernel
dim3 gridSize((width - 1) / blockSize.x + 1, (height - 1) / blockSize.y + 1);
printf("block size %idxC%i, grid size %idxC%i\n", blockSize.x, blockSize.y, gridSize.x, gridSize.y);
timer.Start();
if (kernelType == 1)
{
// TODO: call blurImgKernel1
hipLaunchKernelGGL(( blurImgKernel1), dim3(gridSize), dim3(blockSize), 0, 0, d_inPixels, width, height, d_filter, filterWidth, d_outPixels);
}
else if (kernelType == 2)
{
// TODO: call blurImgKernel2
size_t smem = (blockSize.x + (filterWidth >> 1) << 1) * (blockSize.y + (filterWidth >> 1) << 1) * sizeof(uchar3);
hipLaunchKernelGGL(( blurImgKernel2), dim3(gridSize), dim3(blockSize), smem, 0, d_inPixels, width, height, d_filter, filterWidth, d_outPixels);
}
else
{
// TODO: call blurImgKernel3
size_t smem = (blockSize.x + (filterWidth >> 1) << 1) * (blockSize.y + (filterWidth >> 1) << 1) * sizeof(uchar3);
hipLaunchKernelGGL(( blurImgKernel3), dim3(gridSize), dim3(blockSize), smem, 0, d_inPixels, width, height, filterWidth, d_outPixels);
}
timer.Stop();
float time = timer.Elapsed();
printf("Kernel time: %f ms\n", time);
hipDeviceSynchronize();
CHECK(hipGetLastError());
// Copy result from device memory
CHECK(hipMemcpy(outPixels, d_outPixels, pixelsSize, hipMemcpyDeviceToHost));
// Free device memories
CHECK(hipFree(d_inPixels));
CHECK(hipFree(d_outPixels));
if (kernelType == 1 || kernelType == 2)
{
CHECK(hipFree(d_filter));
}
}
}
float computeError(uchar3 *a1, uchar3 *a2, int n)
{
float err = 0;
for (int i = 0; i < n; i++)
{
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
void printError(uchar3 *deviceResult, uchar3 *hostResult, int width, int height)
{
float err = computeError(deviceResult, hostResult, width * height);
printf("Error: %f\n", err);
}
char *concatStr(const char *s1, const char *s2)
{
char *result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
void printDeviceInfo()
{
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("CMEM: %lu bytes\n", devProv.totalConstMem);
printf("L2 cache: %i bytes\n", devProv.l2CacheSize);
printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor);
printf("****************************\n");
}
int main(int argc, char **argv)
{
if (argc != 3 && argc != 5)
{
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
printDeviceInfo();
// Read input image file
int width, height;
uchar3 *inPixels;
readPnm(argv[1], width, height, inPixels);
printf("\nImage size (width x height): %i x %i\n", width, height);
// Set up a simple filter with blurring effect
int filterWidth = FILTER_WIDTH;
float *filter = (float *)malloc(filterWidth * filterWidth * sizeof(float));
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
filter[filterR * filterWidth + filterC] = 1. / (filterWidth * filterWidth);
}
}
// Blur input image not using device
uchar3 *correctOutPixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, correctOutPixels);
// Blur input image using device, kernel 1
dim3 blockSize(32, 32); // Default
if (argc == 5)
{
blockSize.x = atoi(argv[3]);
blockSize.y = atoi(argv[4]);
}
uchar3 *outPixels1 = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, outPixels1, true, blockSize, 1);
printError(outPixels1, correctOutPixels, width, height);
// Blur input image using device, kernel 2
uchar3 *outPixels2 = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, outPixels2, true, blockSize, 2);
printError(outPixels2, correctOutPixels, width, height);
// Blur input image using device, kernel 3
uchar3 *outPixels3 = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, outPixels3, true, blockSize, 3);
printError(outPixels3, correctOutPixels, width, height);
// Write results to files
char * outFileNameBase = strtok(argv[2], "."); // Get rid of extension
writePnm(correctOutPixels, width, height, concatStr(outFileNameBase, "_host.pnm"));
writePnm(outPixels1, width, height, concatStr(outFileNameBase, "_device1.pnm"));
writePnm(outPixels2, width, height, concatStr(outFileNameBase, "_device2.pnm"));
writePnm(outPixels3, width, height, concatStr(outFileNameBase, "_device3.pnm"));
// Free memories
free(inPixels);
free(filter);
free(correctOutPixels);
free(outPixels1);
free(outPixels2);
free(outPixels3);
}
|
542bdeb0eab06bd69e4894f5fa803e1bc7e5b6e5.cu
|
%%cu
#include <stdio.h>
#include <stdint.h>
#define FILTER_WIDTH 9
__constant__ float dc_filter[FILTER_WIDTH * FILTER_WIDTH];
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void readPnm(char *fileName, int &width, int &height, uchar3 *&pixels)
{
FILE *f = fopen(fileName, "r");
if (f == NULL)
{
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
if (strcmp(type, "P3") != 0) // In this exercise, we don't touch other types
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int max_val;
fscanf(f, "%i", &max_val);
if (max_val > 255) // In this exercise, we assume 1 byte per value
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i < width * height; i++)
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
fclose(f);
}
void writePnm(uchar3 *pixels, int width, int height, char *fileName)
{
FILE *f = fopen(fileName, "w");
if (f == NULL)
{
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++)
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
fclose(f);
}
__global__ void blurImgKernel1(uchar3 *inPixels, int width, int height,
float *filter, int filterWidth,
uchar3 *outPixels)
{
// TODO
int idxR = blockIdx.y * blockDim.y + threadIdx.y;
int idxC = blockIdx.x * blockDim.x + threadIdx.x;
int idx = idxR * width + idxC;
int filterPadding = filterWidth / 2;
if (idxR < height && idxC < width)
{
float3 outPixel = make_float3(0, 0, 0);
for (int fR = 0; fR < filterWidth; fR++)
{
for (int fC = 0; fC < filterWidth; fC++)
{
float filterVal = filter[fR * filterWidth + fC];
int inR = (idxR - filterPadding) + fR;
int inC = (idxC - filterPadding) + fC;
inR = min(height - 1, max(0, inR));
inC = min(width - 1, max(0, inC));
uchar3 inPixel = inPixels[inR * width + inC];
outPixel.x += filterVal * inPixel.x;
outPixel.y += filterVal * inPixel.y;
outPixel.z += filterVal * inPixel.z;
}
}
outPixels[idx] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
__global__ void blurImgKernel2(uchar3 *inPixels, int width, int height,
float *filter, int filterWidth,
uchar3 *outPixels)
{
// TODO
extern __shared__ uchar3 s_inPixels[];
int idxC = threadIdx.x + blockIdx.x * blockDim.x;
int idxR = threadIdx.y + blockIdx.y * blockDim.y;
int filterPadding = filterWidth / 2;
int shareBlockWidth = blockDim.x + filterWidth;
int inRow = idxR - filterPadding;
int inCol = idxC - filterPadding;
inRow = max(0, inRow);
inCol = max(0, inCol);
// SMEM copying : [-filterPadding, -filterPadding] -> [0, 0]
s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x] = inPixels[inRow * width + inCol];
// SMEM copying : [blockDim.x - filterPadding, -filterPadding] -> [0 + blockDim.x, 0]
if (floorf(threadIdx.x / filterWidth) == 0){
inRow = idxR - filterPadding;
inRow = max(0, inRow);
inCol = (idxC - filterPadding) + blockDim.x;
inCol = min(width - 1, max(0, inCol));
s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x + blockDim.x] = inPixels[inRow * width + inCol];
}
// SMEM copying : [-filterPadding, blockDim.y - filterPadding] -> [0, 0 + blockDim.y]
if (floorf(threadIdx.y / filterWidth) == 0){
inRow = (idxR - filterPadding) + blockDim.y;
inRow = min(height - 1, max(0, inRow));
inCol = idxC - filterPadding;
inCol = max(0, inCol);
s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + threadIdx.x] = inPixels[inRow * width + inCol];
}
// SMEM copying : [blockDim.x - filterPadding, blockDim.y - filterPadding] -> [0 + blockDim.x, 0 + blockDim.y]
if (floorf(threadIdx.y / filterWidth) == 0 && floorf(threadIdx.x / filterWidth) == 0){
inRow = (idxR - filterPadding) + blockDim.y;
inRow = min(height - 1, max(0, inRow));
inCol = (idxC - filterPadding) + blockDim.x;
inCol = min(width - 1, max(0, inCol));
s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + blockDim.x + threadIdx.x] =
inPixels[inRow * width + inCol];
}
__syncthreads();
// Convolution : [0, 0] -> [+filterPaddimg, +filterPadding]
if (idxC < width && idxR < height)
{
float3 outPixel = make_float3(0, 0, 0);
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
float filterVal = filter[filterR * filterWidth + filterC];
int inPixelR = threadIdx.y + filterR;
int inPixelC = threadIdx.x + filterC;
uchar3 inPixel = s_inPixels[inPixelR * shareBlockWidth + inPixelC];
outPixel.x += (filterVal * inPixel.x);
outPixel.y += (filterVal * inPixel.y);
outPixel.z += (filterVal * inPixel.z);
}
}
outPixels[idxR * width + idxC] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
__global__ void blurImgKernel3(uchar3 *inPixels, int width, int height,
int filterWidth,
uchar3 *outPixels)
{
// TODO
extern __shared__ uchar3 s_inPixels[];
int idxC = threadIdx.x + blockIdx.x * blockDim.x;
int idxR = threadIdx.y + blockIdx.y * blockDim.y;
int filterPadding = filterWidth / 2;
int shareBlockWidth = blockDim.x + filterWidth;
int inRow = idxR - filterPadding;
int inCol = idxC - filterPadding;
inRow = max(0, inRow);
inCol = max(0, inCol);
// SMEM copying : [-filterPadding, -filterPadding] -> [0, 0]
s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x] = inPixels[inRow * width + inCol];
// SMEM copying : [blockDim.x - filterPadding, -filterPadding] -> [0 + blockDim.x, 0]
if (floorf(threadIdx.x / filterWidth) == 0){
inRow = idxR - filterPadding;
inRow = max(0, inRow);
inCol = (idxC - filterPadding) + blockDim.x;
inCol = min(width - 1, max(0, inCol));
s_inPixels[threadIdx.y * shareBlockWidth + threadIdx.x + blockDim.x] = inPixels[inRow * width + inCol];
}
// SMEM copying : [-filterPadding, blockDim.y - filterPadding] -> [0, 0 + blockDim.y]
if (floorf(threadIdx.y / filterWidth) == 0){
inRow = (idxR - filterPadding) + blockDim.y;
inRow = min(height - 1, max(0, inRow));
inCol = idxC - filterPadding;
inCol = max(0, inCol);
s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + threadIdx.x] = inPixels[inRow * width + inCol];
}
// SMEM copying : [blockDim.x - filterPadding, blockDim.y - filterPadding] -> [0 + blockDim.x, 0 + blockDim.y]
if (floorf(threadIdx.y / filterWidth) == 0 && floorf(threadIdx.x / filterWidth) == 0){
inRow = (idxR - filterPadding) + blockDim.y;
inRow = min(height - 1, max(0, inRow));
inCol = (idxC - filterPadding) + blockDim.x;
inCol = min(width - 1, max(0, inCol));
s_inPixels[(threadIdx.y + blockDim.y) * shareBlockWidth + blockDim.x + threadIdx.x] = inPixels[inRow * width + inCol];
}
__syncthreads();
// Convolution : [0, 0] -> [+filterPaddimg, +filterPadding]
if (idxC < width && idxR < height)
{
float3 outPixel = make_float3(0, 0, 0);
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
float filterVal = dc_filter[filterR * filterWidth + filterC];
int inPixelR = threadIdx.y + filterR;
int inPixelC = threadIdx.x + filterC;
uchar3 inPixel = s_inPixels[inPixelR * shareBlockWidth + inPixelC];
outPixel.x += (filterVal * inPixel.x);
outPixel.y += (filterVal * inPixel.y);
outPixel.z += (filterVal * inPixel.z);
}
}
outPixels[idxR * width + idxC] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
void blurImg(uchar3 *inPixels, int width, int height, float *filter, int filterWidth,
uchar3 *outPixels,
bool useDevice = false, dim3 blockSize = dim3(1, 1), int kernelType = 1)
{
if (useDevice == false)
{
for (int outPixelsR = 0; outPixelsR < height; outPixelsR++)
{
for (int outPixelsC = 0; outPixelsC < width; outPixelsC++)
{
float3 outPixel = make_float3(0, 0, 0);
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
float filterVal = filter[filterR * filterWidth + filterC];
int inR = outPixelsR - filterWidth / 2 + filterR;
int inC = outPixelsC - filterWidth / 2 + filterC;
inR = min(max(0, inR), height - 1);
inC = min(max(0, inC), width - 1);
uchar3 inPixel = inPixels[inR * width + inC];
outPixel.x += filterVal * inPixel.x;
outPixel.y += filterVal * inPixel.y;
outPixel.z += filterVal * inPixel.z;
}
}
outPixels[outPixelsR * width + outPixelsC] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
}
else // Use device
{
GpuTimer timer;
printf("\nKernel %i, ", kernelType);
// Allocate device memories
uchar3 *d_inPixels, *d_outPixels;
float *d_filter;
size_t pixelsSize = width * height * sizeof(uchar3);
size_t filterSize = filterWidth * filterWidth * sizeof(float);
CHECK(cudaMalloc(&d_inPixels, pixelsSize));
CHECK(cudaMalloc(&d_outPixels, pixelsSize));
if (kernelType == 1 || kernelType == 2)
{
CHECK(cudaMalloc(&d_filter, filterSize));
}
// Copy data to device memories
CHECK(cudaMemcpy(d_inPixels, inPixels, pixelsSize, cudaMemcpyHostToDevice));
if (kernelType == 1 || kernelType == 2)
{
CHECK(cudaMemcpy(d_filter, filter, filterSize, cudaMemcpyHostToDevice));
}
else
{
// TODO: copy data from "filter" (on host) to "dc_filter" (on CMEM of device)
CHECK(cudaMemcpyToSymbol(dc_filter, filter, filterSize));
}
// Call kernel
dim3 gridSize((width - 1) / blockSize.x + 1, (height - 1) / blockSize.y + 1);
printf("block size %idxC%i, grid size %idxC%i\n", blockSize.x, blockSize.y, gridSize.x, gridSize.y);
timer.Start();
if (kernelType == 1)
{
// TODO: call blurImgKernel1
blurImgKernel1<<<gridSize, blockSize>>>(d_inPixels, width, height, d_filter, filterWidth, d_outPixels);
}
else if (kernelType == 2)
{
// TODO: call blurImgKernel2
size_t smem = (blockSize.x + (filterWidth >> 1) << 1) * (blockSize.y + (filterWidth >> 1) << 1) * sizeof(uchar3);
blurImgKernel2<<<gridSize, blockSize, smem>>>(d_inPixels, width, height, d_filter, filterWidth, d_outPixels);
}
else
{
// TODO: call blurImgKernel3
size_t smem = (blockSize.x + (filterWidth >> 1) << 1) * (blockSize.y + (filterWidth >> 1) << 1) * sizeof(uchar3);
blurImgKernel3<<<gridSize, blockSize, smem>>>(d_inPixels, width, height, filterWidth, d_outPixels);
}
timer.Stop();
float time = timer.Elapsed();
printf("Kernel time: %f ms\n", time);
cudaDeviceSynchronize();
CHECK(cudaGetLastError());
// Copy result from device memory
CHECK(cudaMemcpy(outPixels, d_outPixels, pixelsSize, cudaMemcpyDeviceToHost));
// Free device memories
CHECK(cudaFree(d_inPixels));
CHECK(cudaFree(d_outPixels));
if (kernelType == 1 || kernelType == 2)
{
CHECK(cudaFree(d_filter));
}
}
}
float computeError(uchar3 *a1, uchar3 *a2, int n)
{
float err = 0;
for (int i = 0; i < n; i++)
{
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
void printError(uchar3 *deviceResult, uchar3 *hostResult, int width, int height)
{
float err = computeError(deviceResult, hostResult, width * height);
printf("Error: %f\n", err);
}
char *concatStr(const char *s1, const char *s2)
{
char *result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
void printDeviceInfo()
{
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("CMEM: %lu bytes\n", devProv.totalConstMem);
printf("L2 cache: %i bytes\n", devProv.l2CacheSize);
printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor);
printf("****************************\n");
}
int main(int argc, char **argv)
{
if (argc != 3 && argc != 5)
{
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
printDeviceInfo();
// Read input image file
int width, height;
uchar3 *inPixels;
readPnm(argv[1], width, height, inPixels);
printf("\nImage size (width x height): %i x %i\n", width, height);
// Set up a simple filter with blurring effect
int filterWidth = FILTER_WIDTH;
float *filter = (float *)malloc(filterWidth * filterWidth * sizeof(float));
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
filter[filterR * filterWidth + filterC] = 1. / (filterWidth * filterWidth);
}
}
// Blur input image not using device
uchar3 *correctOutPixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, correctOutPixels);
// Blur input image using device, kernel 1
dim3 blockSize(32, 32); // Default
if (argc == 5)
{
blockSize.x = atoi(argv[3]);
blockSize.y = atoi(argv[4]);
}
uchar3 *outPixels1 = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, outPixels1, true, blockSize, 1);
printError(outPixels1, correctOutPixels, width, height);
// Blur input image using device, kernel 2
uchar3 *outPixels2 = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, outPixels2, true, blockSize, 2);
printError(outPixels2, correctOutPixels, width, height);
// Blur input image using device, kernel 3
uchar3 *outPixels3 = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, outPixels3, true, blockSize, 3);
printError(outPixels3, correctOutPixels, width, height);
// Write results to files
char * outFileNameBase = strtok(argv[2], "."); // Get rid of extension
writePnm(correctOutPixels, width, height, concatStr(outFileNameBase, "_host.pnm"));
writePnm(outPixels1, width, height, concatStr(outFileNameBase, "_device1.pnm"));
writePnm(outPixels2, width, height, concatStr(outFileNameBase, "_device2.pnm"));
writePnm(outPixels3, width, height, concatStr(outFileNameBase, "_device3.pnm"));
// Free memories
free(inPixels);
free(filter);
free(correctOutPixels);
free(outPixels1);
free(outPixels2);
free(outPixels3);
}
|
a35dbfe1c16c4df0bd3313216d4b353c36322e0b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "TSPCuda.h"
#include "globalData.h"
#define CHECK_GPU(msg) check_gpu__ (__FILE__, __LINE__, (msg))
static void check_gpu__ (const char * file, size_t line, const char * msg);
static void check_gpu__ (const char* file, size_t line, const char* msg)
{
hipError_t err = hipGetLastError ();
if (err != hipSuccess) {
fprintf (stderr, "*** [%s:%lu] %s -- CUDA Error (%d): %s ***\n",
file, line, msg, (int)err, hipGetErrorString (err));
exit (-1);
}
}
void CheckValidity(int *tour, char *text)
{
int visited[NUM_CITIES + 1];
int i;
for(i = 0; i <= NUM_CITIES; i++)
visited[i] = 0;
for(i = 0; i < NUM_CITIES; i++)
{
if(visited[tour[i]] == 1)
{
printf("ERROR:Invalid path generated:<%s>,city %d repeated\n" ,text, tour[i] );
//printTour(tour);
//showBackTrace();
exit(0);
}
visited[tour[i]] = 1;
}
for(i = 1; i <= NUM_CITIES; i++)
{
if(visited[i] == 0)
{
printf("ERROR:Invalid path generated:<%s>, city %d not present\n", text, i);
//printTour(tour);
//showBackTrace();
exit(0);
}
}
}
__global__
void TSPSwapKernel(unsigned int n, int* completeTour, int* coords, unsigned int loops, int numBlocks)
{
__shared__ float fitnessMatrix[NUM_THREADS][NUM_THREADS];
int i,j, prevX, prevY, city , Min = 0 , counter = 0;
__shared__ int swapCities[2];
__shared__ int globalTourThreads[NUM_THREADS];
int localTour[NUM_THREADS];
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int bid = blockIdx.x;
int offset = bid * NUM_THREADS;
int subTourLength = 32;
int temp;
float distance = 0;
float distanceBackup = 0;
if(bid == numBlocks - 1) // last block
{
if(NUM_CITIES % 32 == 0)
subTourLength = NUM_THREADS;
else
subTourLength = NUM_CITIES % NUM_THREADS;
}
else
subTourLength = NUM_THREADS;
for(i = 0; i < subTourLength; i++) {
localTour[i] = completeTour[offset + i];
globalTourThreads[i] = localTour[i];
}
prevX = (coords + (2 * (localTour[0] - 1)))[0];
prevY = (coords + (2 * (localTour[0] - 1)))[1];
for(i = 1; i < subTourLength; i++)
{
city = localTour[i] - 1;
distanceBackup += (float)(((coords + (city * 2))[1] - prevY) * ((coords + (city * 2))[1] - prevY))
+ (float)(((coords + (2 * city))[0] - prevX) * ((coords + (2 * city))[0] - prevX));
prevX = (coords + (city * 2))[0];
prevY = (coords + (city * 2))[1];
}
fitnessMatrix[tidx][tidy] = (float)INT_MAX;
//MAX_ITERATIONS = loops ;
while ( counter != loops )
{
//improvement = 0;
swapCities[0] = 0;
swapCities[1] = 0;
if(tidx < tidy && tidx != 0 && tidy < subTourLength - 1)
{
/*
* what is in globalTourThreads for the
* first iteration ?
*/
for(i = 0; i < subTourLength; i++) {
localTour[i] = globalTourThreads[i];
}
//swap cities tidx and tidy
temp = localTour[tidx];
localTour[tidx] = localTour[tidy];
localTour[tidy] = temp;
prevX = (coords + (2 * (localTour[0] - 1)))[0];
prevY = (coords + (2 * (localTour[0] - 1)))[1];
distance = 0;
for(i = 1; i < subTourLength; i++)
{
int city = localTour[i] - 1;
distance += (((coords + (city * 2))[1] - prevY) * ((coords + (city * 2))[1] - prevY))
+ (float)(((coords + (2 * city))[0] - prevX) * ((coords + (2 * city))[0] - prevX));
prevX = (coords + (city * 2))[0];
prevY = (coords + (city * 2))[1];
}
//fitnessMatrix[tidx][tidy] = distance;
//printf("%d(%d, %d) %f %f \n", bid, tidx, tidy, distance, distanceBackup);
if(distance < distanceBackup) //if new distance is lower than the old , reject.
{
fitnessMatrix[tidx][tidy] = distance;
//distanceBackup = distance;
}
}
__syncthreads();
Min = INT_MAX;
//int currentMin = 0;
if(threadIdx.x == 0 && threadIdx.y == 0) {
for( i = 1 ; i < subTourLength -1 ; i++ ) {
for ( j = i ; j < subTourLength -1 ; j++) {
//improvement += fitnessMatrix[i][j];
if (fitnessMatrix[i][j] < Min )
{
Min = fitnessMatrix[i][j];
swapCities[0] = i;
swapCities[1] = j;
}
}
}
temp = globalTourThreads[swapCities[0]];
globalTourThreads[swapCities[0]] = globalTourThreads[swapCities[1]];
globalTourThreads[swapCities[1]] = temp;
if((swapCities[0] == 0 && swapCities[1] == 0) || counter == MAX_ITERATIONS - 1) {
for(i = 0 ; i < subTourLength; i++) {
completeTour[offset + i] = globalTourThreads[i];
}
//printf("In termination loop for block:%d, counter:%d\n", bid, counter);
break;
}
}
__syncthreads();
distanceBackup = fitnessMatrix[swapCities[0]][swapCities[1]];
fitnessMatrix[swapCities[0]][swapCities[1]] = INT_MAX;
/*
* find the max value from fitness and swap them
* in the localtour and update localtour backup
* for all threads.
*/
counter++;
}
}
// __global__
// void TSPSwapKernel1 (unsigned int n, int* completeTour, int* coords, unsigned int loops)
// {
// int currentNumCities = 0;
// currentNumCities = NUM_CITIES / 4;
// __shared__ float fitnessMatrix[(NUM_CITIES/4) + 1][(NUM_CITIES/4) + 1];
// __shared__ int globalTourThreads[(NUM_CITIES/4) + 1];
// __shared__ int swapCities[2] ;
// int localTour[(NUM_CITIES/4)+1];
// int i,j, prevX, prevY, city , Min = 0 , counter = 0;
// int tidx = threadIdx.x;
// int tidy = threadIdx.y;
// int bid = blockIdx.x;
// float distance = 0;
// float distanceBackup = 0;
// float improvement = 0;
// int offset = 0;
// int rem = NUM_CITIES % 4;
// int temp;
// offset = (NUM_CITIES/4) * bid;
// if(bid < rem)
// {
// currentNumCities++;
// offset += bid;
// }
// if(bid >= rem)
// offset += rem;
// for(i = 0; i < currentNumCities; i++) {
// localTour[i] = completeTour[offset + i];
// globalTourThreads[i] = localTour[i];
// }
// prevX = (coords + (2 * localTour[0]))[0];
// prevY = (coords + (2 * localTour[0]))[1];
// for(i = 1; i < currentNumCities; i++)
// {
// city = localTour[i];
// distanceBackup += (float)(((coords + (city * 2))[1] - prevY) * ((coords + (city * 2))[1] - prevY))
// + (float)(((coords + (2 * city))[0] - prevX) * ((coords + (2 * city))[0] - prevX));
// prevX = (coords + (city * 2))[0];
// prevY = (coords + (city * 2))[1];
// }
// fitnessMatrix[tidx][tidy] = (float)INT_MAX;
// while ( counter != MAX_ITERATIONS )
// {
// improvement = 0;
// swapCities[0] = 0;
// swapCities[1] = 0;
// if(tidx < tidy && tidx != 0 && tidy != currentNumCities)
// {
// /*
// * what is in globalTourThreads for the
// * first iteration ?
// */
// for(i = 0; i < currentNumCities; i++) {
// localTour[i] = globalTourThreads[i];
// }
// //swap cities tidx and tidy
// temp = localTour[tidx];
// localTour[tidx] = localTour[tidy];
// localTour[tidy] = temp;
// prevX = (coords + (2 * localTour[0]))[0];
// prevY = (coords + (2 * localTour[0]))[1];
// distance = 0;
// for(i = 1; i < currentNumCities; i++)
// {
// int city = localTour[i];
// distance += (((coords + (city * 2))[1] - prevY) * ((coords + (city * 2))[1] - prevY))
// + (float)(((coords + (2 * city))[0] - prevX) * ((coords + (2 * city))[0] - prevX));
// prevX = (coords + (city * 2))[0];
// prevY = (coords + (city * 2))[1];
// }
// //fitnessMatrix[tidx][tidy] = distance;
// //printf("%d(%d, %d) %f %f \n", bid, tidx, tidy, distance, distanceBackup);
// if(distance < distanceBackup) //if new distance is lower than the old , reject.
// {
// fitnessMatrix[tidx][tidy] = distance;
// //distanceBackup = distance;
// }
// }
// __syncthreads();
// Min = INT_MAX;
// //int currentMin = 0;
// if(threadIdx.x == 0 && threadIdx.y == 0) {
// for( i = 1 ; i < currentNumCities -1 ; i++ ) {
// for ( j = i ; j < currentNumCities -1 ; j++) {
// improvement += fitnessMatrix[i][j];
// if (fitnessMatrix[i][j] < Min )
// {
// Min = fitnessMatrix[i][j];
// swapCities[0] = i;
// swapCities[1] = j;
// }
// }
// }
// temp = globalTourThreads[swapCities[0]];
// globalTourThreads[swapCities[0]] = globalTourThreads[swapCities[1]];
// globalTourThreads[swapCities[1]] = temp;
// if((swapCities[0] == 0 && swapCities[1] == 0) || counter == MAX_ITERATIONS - 1) {
// for(i = 0 ; i < currentNumCities; i++) {
// completeTour[offset + i] = globalTourThreads[i];
// }
// //printf("In termination loop for block:%d, counter:%d\n", bid, counter);
// break;
// }
// }
// __syncthreads();
// distanceBackup = fitnessMatrix[swapCities[0]][swapCities[1]];
// fitnessMatrix[swapCities[0]][swapCities[1]] = INT_MAX;
// /*
// * find the max value from fitness and swap them
// * in the localtour and update localtour backup
// * for all threads.
// */
// counter++;
// }
// /*
// * find the max value from fitness and swap only
// * those elements in completeTour
// */
// }
int *
createArrayOnGPU (unsigned int n)
{
int* tour_gpu = NULL;
if (n) {
hipMalloc (&tour_gpu, n * sizeof (int));
CHECK_GPU ("Out of memory?");
assert (tour_gpu);
}
return tour_gpu;
}
int* createDMatOnGPU(unsigned int n)
{
int* coords_dMat = NULL;
if(n)
{
hipMalloc(&coords_dMat,2 * n * sizeof(int));
CHECK_GPU("OUT OF MEMORY FOR DMAT");
assert(coords_dMat);
}
return coords_dMat;
}
void
freeKeysOnGPU (int* tour_gpu)
{
if (tour_gpu) hipFree (tour_gpu);
}
void
copyKeysToGPU (unsigned int n, int* Dest_gpu, const int* Src_cpu)
{
hipMemcpy (Dest_gpu, Src_cpu, n * sizeof (int),
hipMemcpyHostToDevice); CHECK_GPU ("Copying keys to GPU");
}
void copyDMatToGPU(unsigned int n, int * Dest_gpu, const int* Src_cpu)
{
hipMemcpy(Dest_gpu , Src_cpu, 2 * n * sizeof(int), hipMemcpyHostToDevice);
CHECK_GPU ("Copying coords to GPU");
}
void
copyKeysFromGPU (unsigned int n, int* Dest_cpu, int* Src_gpu)
{
hipMemcpy (Dest_cpu, Src_gpu, n * sizeof (int),hipMemcpyDeviceToHost);
CHECK_GPU ("Copying keys from GPU");
}
extern "C"
void TSPSwapRun(int* tour,const int* coords)
{
int n = NUM_CITIES;
int numBlocks = n/32;
if(n % 32 != 0)
numBlocks++;
int* tour_gpu = createArrayOnGPU(n);
int* coords_gpu = createDMatOnGPU(n);
int *newTour = (int *)malloc(sizeof(int) * n);
int *coordsArray = (int *)malloc(2 * n * sizeof(int));
for(int i = 0; i < n; i++)
{
coordsArray[2 * i] = coords[2 * i];
coordsArray[(2 * i) + 1] = coords[(2 * i) + 1];
}
// printf("CUDA PATH before KERNEL\n");
for(int i = 0 ; i < n; i++)
{
if(tour[i] == 0)
printf("PANIC: zero city passed %d\n", i);
}
copyKeysToGPU (n,tour_gpu, tour);
copyDMatToGPU(n, coords_gpu, coords);
dim3 threadsPerBlock(32, 32);
hipLaunchKernelGGL(( TSPSwapKernel), dim3(numBlocks) ,dim3(threadsPerBlock), 0, 0, n, tour_gpu, coords_gpu, localIter, numBlocks);
hipDeviceSynchronize();
copyKeysFromGPU(n, tour, tour_gpu);
// printf("CUDA PATH after KERNEL\n");
// for(int i = 0 ; i < NUM_CITIES; i++)
// printf("%d-", tour[i]);
// printf("\n");
CheckValidity(tour, "After Cuda Kernel");
if(tour_gpu)
hipFree(tour_gpu);
if(coords_gpu)
hipFree(coords_gpu);
}
/*int main()
{
int tour[NUM_CITIES];
int* coords = (int*)malloc(2 * NUM_CITIES * sizeof(int *));
printf("\nNumber of Cities %d " , NUM_CITIES);
for(int i = 0; i < NUM_CITIES; i++)
{
tour[i] = i + 1;
coords[(2 * i)] = i;
coords[(2 * i) + 1] = i;
}
tour[1] = 3;
tour[2] = 2;
tour[17] = 19;
tour[18] = 18;
// printf("Before Kernel:\n");
// for(int i = 0 ; i < NUM_CITIES; i++)
// printf("%d ", tour[i]);
// printf("\n");
TSPSwapRun((int *)tour, (const int*)coords);
// printf("After Kernel:\n");
// for(int i = 0 ; i < NUM_CITIES; i++)
// printf("%d ", tour[i]);
// printf("\n");
}*/
|
a35dbfe1c16c4df0bd3313216d4b353c36322e0b.cu
|
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "TSPCuda.h"
#include "globalData.h"
#define CHECK_GPU(msg) check_gpu__ (__FILE__, __LINE__, (msg))
static void check_gpu__ (const char * file, size_t line, const char * msg);
static void check_gpu__ (const char* file, size_t line, const char* msg)
{
cudaError_t err = cudaGetLastError ();
if (err != cudaSuccess) {
fprintf (stderr, "*** [%s:%lu] %s -- CUDA Error (%d): %s ***\n",
file, line, msg, (int)err, cudaGetErrorString (err));
exit (-1);
}
}
void CheckValidity(int *tour, char *text)
{
int visited[NUM_CITIES + 1];
int i;
for(i = 0; i <= NUM_CITIES; i++)
visited[i] = 0;
for(i = 0; i < NUM_CITIES; i++)
{
if(visited[tour[i]] == 1)
{
printf("ERROR:Invalid path generated:<%s>,city %d repeated\n" ,text, tour[i] );
//printTour(tour);
//showBackTrace();
exit(0);
}
visited[tour[i]] = 1;
}
for(i = 1; i <= NUM_CITIES; i++)
{
if(visited[i] == 0)
{
printf("ERROR:Invalid path generated:<%s>, city %d not present\n", text, i);
//printTour(tour);
//showBackTrace();
exit(0);
}
}
}
__global__
void TSPSwapKernel(unsigned int n, int* completeTour, int* coords, unsigned int loops, int numBlocks)
{
__shared__ float fitnessMatrix[NUM_THREADS][NUM_THREADS];
int i,j, prevX, prevY, city , Min = 0 , counter = 0;
__shared__ int swapCities[2];
__shared__ int globalTourThreads[NUM_THREADS];
int localTour[NUM_THREADS];
int tidx = threadIdx.x;
int tidy = threadIdx.y;
int bid = blockIdx.x;
int offset = bid * NUM_THREADS;
int subTourLength = 32;
int temp;
float distance = 0;
float distanceBackup = 0;
if(bid == numBlocks - 1) // last block
{
if(NUM_CITIES % 32 == 0)
subTourLength = NUM_THREADS;
else
subTourLength = NUM_CITIES % NUM_THREADS;
}
else
subTourLength = NUM_THREADS;
for(i = 0; i < subTourLength; i++) {
localTour[i] = completeTour[offset + i];
globalTourThreads[i] = localTour[i];
}
prevX = (coords + (2 * (localTour[0] - 1)))[0];
prevY = (coords + (2 * (localTour[0] - 1)))[1];
for(i = 1; i < subTourLength; i++)
{
city = localTour[i] - 1;
distanceBackup += (float)(((coords + (city * 2))[1] - prevY) * ((coords + (city * 2))[1] - prevY))
+ (float)(((coords + (2 * city))[0] - prevX) * ((coords + (2 * city))[0] - prevX));
prevX = (coords + (city * 2))[0];
prevY = (coords + (city * 2))[1];
}
fitnessMatrix[tidx][tidy] = (float)INT_MAX;
//MAX_ITERATIONS = loops ;
while ( counter != loops )
{
//improvement = 0;
swapCities[0] = 0;
swapCities[1] = 0;
if(tidx < tidy && tidx != 0 && tidy < subTourLength - 1)
{
/*
* what is in globalTourThreads for the
* first iteration ?
*/
for(i = 0; i < subTourLength; i++) {
localTour[i] = globalTourThreads[i];
}
//swap cities tidx and tidy
temp = localTour[tidx];
localTour[tidx] = localTour[tidy];
localTour[tidy] = temp;
prevX = (coords + (2 * (localTour[0] - 1)))[0];
prevY = (coords + (2 * (localTour[0] - 1)))[1];
distance = 0;
for(i = 1; i < subTourLength; i++)
{
int city = localTour[i] - 1;
distance += (((coords + (city * 2))[1] - prevY) * ((coords + (city * 2))[1] - prevY))
+ (float)(((coords + (2 * city))[0] - prevX) * ((coords + (2 * city))[0] - prevX));
prevX = (coords + (city * 2))[0];
prevY = (coords + (city * 2))[1];
}
//fitnessMatrix[tidx][tidy] = distance;
//printf("%d(%d, %d) %f %f \n", bid, tidx, tidy, distance, distanceBackup);
if(distance < distanceBackup) //if new distance is lower than the old , reject.
{
fitnessMatrix[tidx][tidy] = distance;
//distanceBackup = distance;
}
}
__syncthreads();
Min = INT_MAX;
//int currentMin = 0;
if(threadIdx.x == 0 && threadIdx.y == 0) {
for( i = 1 ; i < subTourLength -1 ; i++ ) {
for ( j = i ; j < subTourLength -1 ; j++) {
//improvement += fitnessMatrix[i][j];
if (fitnessMatrix[i][j] < Min )
{
Min = fitnessMatrix[i][j];
swapCities[0] = i;
swapCities[1] = j;
}
}
}
temp = globalTourThreads[swapCities[0]];
globalTourThreads[swapCities[0]] = globalTourThreads[swapCities[1]];
globalTourThreads[swapCities[1]] = temp;
if((swapCities[0] == 0 && swapCities[1] == 0) || counter == MAX_ITERATIONS - 1) {
for(i = 0 ; i < subTourLength; i++) {
completeTour[offset + i] = globalTourThreads[i];
}
//printf("In termination loop for block:%d, counter:%d\n", bid, counter);
break;
}
}
__syncthreads();
distanceBackup = fitnessMatrix[swapCities[0]][swapCities[1]];
fitnessMatrix[swapCities[0]][swapCities[1]] = INT_MAX;
/*
* find the max value from fitness and swap them
* in the localtour and update localtour backup
* for all threads.
*/
counter++;
}
}
// __global__
// void TSPSwapKernel1 (unsigned int n, int* completeTour, int* coords, unsigned int loops)
// {
// int currentNumCities = 0;
// currentNumCities = NUM_CITIES / 4;
// __shared__ float fitnessMatrix[(NUM_CITIES/4) + 1][(NUM_CITIES/4) + 1];
// __shared__ int globalTourThreads[(NUM_CITIES/4) + 1];
// __shared__ int swapCities[2] ;
// int localTour[(NUM_CITIES/4)+1];
// int i,j, prevX, prevY, city , Min = 0 , counter = 0;
// int tidx = threadIdx.x;
// int tidy = threadIdx.y;
// int bid = blockIdx.x;
// float distance = 0;
// float distanceBackup = 0;
// float improvement = 0;
// int offset = 0;
// int rem = NUM_CITIES % 4;
// int temp;
// offset = (NUM_CITIES/4) * bid;
// if(bid < rem)
// {
// currentNumCities++;
// offset += bid;
// }
// if(bid >= rem)
// offset += rem;
// for(i = 0; i < currentNumCities; i++) {
// localTour[i] = completeTour[offset + i];
// globalTourThreads[i] = localTour[i];
// }
// prevX = (coords + (2 * localTour[0]))[0];
// prevY = (coords + (2 * localTour[0]))[1];
// for(i = 1; i < currentNumCities; i++)
// {
// city = localTour[i];
// distanceBackup += (float)(((coords + (city * 2))[1] - prevY) * ((coords + (city * 2))[1] - prevY))
// + (float)(((coords + (2 * city))[0] - prevX) * ((coords + (2 * city))[0] - prevX));
// prevX = (coords + (city * 2))[0];
// prevY = (coords + (city * 2))[1];
// }
// fitnessMatrix[tidx][tidy] = (float)INT_MAX;
// while ( counter != MAX_ITERATIONS )
// {
// improvement = 0;
// swapCities[0] = 0;
// swapCities[1] = 0;
// if(tidx < tidy && tidx != 0 && tidy != currentNumCities)
// {
// /*
// * what is in globalTourThreads for the
// * first iteration ?
// */
// for(i = 0; i < currentNumCities; i++) {
// localTour[i] = globalTourThreads[i];
// }
// //swap cities tidx and tidy
// temp = localTour[tidx];
// localTour[tidx] = localTour[tidy];
// localTour[tidy] = temp;
// prevX = (coords + (2 * localTour[0]))[0];
// prevY = (coords + (2 * localTour[0]))[1];
// distance = 0;
// for(i = 1; i < currentNumCities; i++)
// {
// int city = localTour[i];
// distance += (((coords + (city * 2))[1] - prevY) * ((coords + (city * 2))[1] - prevY))
// + (float)(((coords + (2 * city))[0] - prevX) * ((coords + (2 * city))[0] - prevX));
// prevX = (coords + (city * 2))[0];
// prevY = (coords + (city * 2))[1];
// }
// //fitnessMatrix[tidx][tidy] = distance;
// //printf("%d(%d, %d) %f %f \n", bid, tidx, tidy, distance, distanceBackup);
// if(distance < distanceBackup) //if new distance is lower than the old , reject.
// {
// fitnessMatrix[tidx][tidy] = distance;
// //distanceBackup = distance;
// }
// }
// __syncthreads();
// Min = INT_MAX;
// //int currentMin = 0;
// if(threadIdx.x == 0 && threadIdx.y == 0) {
// for( i = 1 ; i < currentNumCities -1 ; i++ ) {
// for ( j = i ; j < currentNumCities -1 ; j++) {
// improvement += fitnessMatrix[i][j];
// if (fitnessMatrix[i][j] < Min )
// {
// Min = fitnessMatrix[i][j];
// swapCities[0] = i;
// swapCities[1] = j;
// }
// }
// }
// temp = globalTourThreads[swapCities[0]];
// globalTourThreads[swapCities[0]] = globalTourThreads[swapCities[1]];
// globalTourThreads[swapCities[1]] = temp;
// if((swapCities[0] == 0 && swapCities[1] == 0) || counter == MAX_ITERATIONS - 1) {
// for(i = 0 ; i < currentNumCities; i++) {
// completeTour[offset + i] = globalTourThreads[i];
// }
// //printf("In termination loop for block:%d, counter:%d\n", bid, counter);
// break;
// }
// }
// __syncthreads();
// distanceBackup = fitnessMatrix[swapCities[0]][swapCities[1]];
// fitnessMatrix[swapCities[0]][swapCities[1]] = INT_MAX;
// /*
// * find the max value from fitness and swap them
// * in the localtour and update localtour backup
// * for all threads.
// */
// counter++;
// }
// /*
// * find the max value from fitness and swap only
// * those elements in completeTour
// */
// }
int *
createArrayOnGPU (unsigned int n)
{
int* tour_gpu = NULL;
if (n) {
cudaMalloc (&tour_gpu, n * sizeof (int));
CHECK_GPU ("Out of memory?");
assert (tour_gpu);
}
return tour_gpu;
}
int* createDMatOnGPU(unsigned int n)
{
int* coords_dMat = NULL;
if(n)
{
cudaMalloc(&coords_dMat,2 * n * sizeof(int));
CHECK_GPU("OUT OF MEMORY FOR DMAT");
assert(coords_dMat);
}
return coords_dMat;
}
void
freeKeysOnGPU (int* tour_gpu)
{
if (tour_gpu) cudaFree (tour_gpu);
}
void
copyKeysToGPU (unsigned int n, int* Dest_gpu, const int* Src_cpu)
{
cudaMemcpy (Dest_gpu, Src_cpu, n * sizeof (int),
cudaMemcpyHostToDevice); CHECK_GPU ("Copying keys to GPU");
}
void copyDMatToGPU(unsigned int n, int * Dest_gpu, const int* Src_cpu)
{
cudaMemcpy(Dest_gpu , Src_cpu, 2 * n * sizeof(int), cudaMemcpyHostToDevice);
CHECK_GPU ("Copying coords to GPU");
}
void
copyKeysFromGPU (unsigned int n, int* Dest_cpu, int* Src_gpu)
{
cudaMemcpy (Dest_cpu, Src_gpu, n * sizeof (int),cudaMemcpyDeviceToHost);
CHECK_GPU ("Copying keys from GPU");
}
extern "C"
void TSPSwapRun(int* tour,const int* coords)
{
int n = NUM_CITIES;
int numBlocks = n/32;
if(n % 32 != 0)
numBlocks++;
int* tour_gpu = createArrayOnGPU(n);
int* coords_gpu = createDMatOnGPU(n);
int *newTour = (int *)malloc(sizeof(int) * n);
int *coordsArray = (int *)malloc(2 * n * sizeof(int));
for(int i = 0; i < n; i++)
{
coordsArray[2 * i] = coords[2 * i];
coordsArray[(2 * i) + 1] = coords[(2 * i) + 1];
}
// printf("CUDA PATH before KERNEL\n");
for(int i = 0 ; i < n; i++)
{
if(tour[i] == 0)
printf("PANIC: zero city passed %d\n", i);
}
copyKeysToGPU (n,tour_gpu, tour);
copyDMatToGPU(n, coords_gpu, coords);
dim3 threadsPerBlock(32, 32);
TSPSwapKernel<<<numBlocks ,threadsPerBlock>>> (n, tour_gpu, coords_gpu, localIter, numBlocks);
cudaDeviceSynchronize();
copyKeysFromGPU(n, tour, tour_gpu);
// printf("CUDA PATH after KERNEL\n");
// for(int i = 0 ; i < NUM_CITIES; i++)
// printf("%d-", tour[i]);
// printf("\n");
CheckValidity(tour, "After Cuda Kernel");
if(tour_gpu)
cudaFree(tour_gpu);
if(coords_gpu)
cudaFree(coords_gpu);
}
/*int main()
{
int tour[NUM_CITIES];
int* coords = (int*)malloc(2 * NUM_CITIES * sizeof(int *));
printf("\nNumber of Cities %d " , NUM_CITIES);
for(int i = 0; i < NUM_CITIES; i++)
{
tour[i] = i + 1;
coords[(2 * i)] = i;
coords[(2 * i) + 1] = i;
}
tour[1] = 3;
tour[2] = 2;
tour[17] = 19;
tour[18] = 18;
// printf("Before Kernel:\n");
// for(int i = 0 ; i < NUM_CITIES; i++)
// printf("%d ", tour[i]);
// printf("\n");
TSPSwapRun((int *)tour, (const int*)coords);
// printf("After Kernel:\n");
// for(int i = 0 ; i < NUM_CITIES; i++)
// printf("%d ", tour[i]);
// printf("\n");
}*/
|
d225c6c1fb753a672a9990a07a30d8ae806b5fa8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <stdlib.h>
#include <locale>
#include <string>
#include <limits>
#include <time.h>
#include <stdio.h>
#include <iomanip>
#include <sys/time.h>
using namespace std;
//------------ Kernel de Processamento
__global__ void Classif(int* d_dados, int* d_class, long dsize, int colsIn, int colsOut)
{
int i=(threadIdx.x * colsIn) + (blockIdx.x * blockDim.x * colsIn);
int o=(threadIdx.x * colsOut) + (blockIdx.x * blockDim.x * colsOut);
int VlOpen,VlHigh,VlLow,VlClose,classe;
//int classe;
if (i<=dsize) {
VlOpen = d_dados[i+1];
VlHigh = d_dados[i+2];
VlLow = d_dados[i+3];
VlClose = d_dados[i+4];
classe=(VlOpen==VlClose ? 512: VlOpen>VlClose ? 256:1024)+(VlLow<VlOpen ? 1:4)+(VlLow<VlClose ? 2:8)+(VlHigh>VlOpen ? 16:64)+(VlHigh>VlClose ? 32:128);
//classe=(d_dados[i+1]==d_dados[i+4] ? 512: d_dados[i+1]>d_dados[i+4] ? 256:1024)+(d_dados[i+3]<d_dados[i+1] ? 1:4)+(d_dados[i+3]<d_dados[i+4] ? 2:8)+(d_dados[i+2]>d_dados[i+1] ? 16:64)+(d_dados[i+2]>d_dados[i+4] ? 32:128);
d_class[o]=d_dados[i];
//d_class[o]=12;
d_class[o+1]=classe;
}
}
//--------------------- Funcoes de tempo --------------------------------
std::string DataHora()
{
time_t rawtime;
struct tm * timeinfo;
char buffer [20];
time ( &rawtime );
timeinfo = localtime ( &rawtime );
strftime (buffer,20,"%F %H%M%S",timeinfo);
return buffer;
}
/* funcao de tempo */
double calcula_tempo(const unsigned long int ini, const unsigned long int fim)
{
double r;
if(fim >= ini)
r = ((double)(fim - ini)) / CLOCKS_PER_SEC;
else
r = ((double)( (fim + (unsigned long int)-1) - ini)) / CLOCKS_PER_SEC;
return r;
}
//------- Classif_paralela:: / std::string ---------------------------
void Classif_GPU(const char * nome, long plins, int nthd, const char * sthd){
char arq[256];
//char arqo[256];
//std::ifstream fin;
int colsIn=5, colsOut=2;
long lins,i, c, last_i_proc, last_c_proc;
int dsize, csize, st_dsize, st_csize, partes, st_gatilho;
//int classe,VlOpen,VlHigh,VlLow,VlClose;
int v_blocos,v_threads, streams_processados, d_deslocamento,c_deslocamento;
std::string sIndice,sVlOpen,sVlHigh,sVlLow,sVlClose;
unsigned long int t_ini;
unsigned long int t_fin;
unsigned long int t_tmp;
unsigned long int t_tmp1;
unsigned long int t_tmp2;
unsigned long int t_tmp3;
unsigned long int t_tmp4;
std::string dateStr,fn,fnl,s_threads;
/*--- define variaveis de tempo -------------*/
timeval start, end;
double delta;
dateStr=DataHora();
std::cout<<" <DataHora > = "<<dateStr<<std::endl;
/* tempo inicial */
t_ini = (unsigned long int) clock;
gettimeofday(&start, NULL); //marcador de incio do processamento
/* -- define as dimenses dos vetores que sero criados em logar de matrizes */
/* -- dsize define o tamanho do vetor de dados em funo do numero de linhas e colunas*/
dsize=plins*colsIn;
/* -- csize define o tamanho do vetor de classificacao em funo do numero de linhas e colunas*/
csize=plins*colsOut;
/* ----- Calcula o tamanho dos streams, de acordo com o numero de partes -----------*/
partes=40;
st_dsize=0;
st_csize=0;
st_dsize=(int)floor((int)dsize/partes);
st_csize=(int)floor((int)csize/partes);
/* ----- Calcula o ponto de executar os streams, de acordo com o numero de partes, mas a cada gatilho executa 2 streams -----------*/
st_gatilho=(int)floor((int)dsize/partes);
st_gatilho*=2;
/* -- Cria os vetores que contero os dados lidos do arquivo e a classificao */
int *h_dados;
int *h_class;
int *d_dados_0;
int *d_class_0;
int *d_dados_1;
int *d_class_1;
/*-------------------------- Define os streams ----------------------------------------*/
hipStream_t strm0, strm1;
hipStreamCreate(&strm0);
hipStreamCreate(&strm1);
std::cout<<" vai alocar memoria na GPU st_dsize= "<< st_dsize <<" st_csize= "<< st_csize<<std::endl;
/*-------------------------- Aloca os vetores no device ----------------------------------------*/
hipMalloc((void**) &d_dados_0, st_dsize * sizeof(int));
hipMalloc((void**) &d_class_0, st_csize * sizeof(int));
hipMalloc((void**) &d_dados_1, st_dsize * sizeof(int));
hipMalloc((void**) &d_class_1, st_csize * sizeof(int));
/*-------------------------- Aloca os vetores no host ----------------------------------------*/
hipHostMalloc((void**) &h_dados, dsize*sizeof(int),hipHostMallocDefault);
hipHostMalloc((void**) &h_class, csize*sizeof(int),hipHostMallocDefault);
lins=plins-0;
std::cout<<" <inicializou lns> = "<<lins<<std::endl;
/*--- pega o num de threads digitadas e calcula os blocos ------------------------- */
v_threads=nthd;
s_threads=std::string(sthd);
v_blocos=(int)ceil((float)(lins/partes)/v_threads);
std::cout<<" <Calculou v_blocos com "<< v_blocos <<" threads com "<< v_threads<<" st_gatilho com "<< st_gatilho <<" dsize="<<dsize<<std::endl;
/* ----- Abre o arquivo csv e inicia a carga dos vetores ------------------- */
strcpy(arq,nome);
ifstream fin(arq);
t_tmp1=(unsigned long int) clock();
if (fin.is_open())
{
t_tmp=(unsigned long int) clock();
/*--- carrega o arquivo no vetor host h_dados e inicializa h_class, transformando valores float em int*/
i=0;
c=0;
streams_processados=0;
c_deslocamento=0;
d_deslocamento=0;
while (fin.good())
{
getline(fin,sIndice,',');
getline(fin,sVlOpen,',');
getline(fin,sVlHigh,',');
getline(fin,sVlLow,',');
getline(fin,sVlClose,'\n');
//std::cout<<"sIndice= "<< sIndice <<"sVlOpen= "<< sVlOpen<<"sVlHigh= "<< sVlHigh<<"sVlLow= "<< sVlLow<<"sVlClose= "<< sVlClose<<std::endl;
//h_dados[i]=std::stoi(sIndice);
h_dados[i]=std::atoi(sIndice.c_str());
//h_dados[i+1]=static_cast<int>(std::stof(sVlOpen,NULL)*100);
h_dados[i+1]=static_cast<int>(std::atof(sVlOpen.c_str())*100);
h_dados[i+2]=static_cast<int>(std::atof(sVlHigh.c_str())*100);
h_dados[i+3]=static_cast<int>(std::atof(sVlLow.c_str())*100);
h_dados[i+4]=static_cast<int>(std::atof(sVlClose.c_str())*100);
h_class[c]=0;
h_class[c+1]=0;
//std::cout<<"Indice= "<< h_dados[i] <<"VlOpen= "<< h_dados[i+1]<<"VlHigh= "<< h_dados[i+2]<<"sVlLow= "<< h_dados[i+3]<<"VlClose= "<< h_dados[i+4]<<std::endl;
/*--- Se atingiu o ponto de transferir os dados (st_gatilho) ou atingiu o ltimo indice de dados -----------
---- st_dsize-colsOut significa o ltimo registro do stream, st_dsize o inicio do prximo stream --------
-------------------- copia os vetores e dispara o kernel -------------------------------------------------*/
if ((i>0) && (i<dsize)) {
if ((i % st_gatilho) == 0)
{
c_deslocamento=streams_processados*st_csize;
d_deslocamento=streams_processados*st_dsize;
//std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl;
hipMemcpyAsync(d_dados_0,h_dados+d_deslocamento,st_dsize * sizeof(int),hipMemcpyHostToDevice, strm0);
hipMemcpyAsync(d_class_0,h_class+c_deslocamento,st_csize * sizeof(int),hipMemcpyHostToDevice, strm0);
/*--- invoca o kernel de classificao ---*/
hipLaunchKernelGGL(( Classif), dim3(v_blocos),dim3(v_threads),0, strm0, d_dados_0, d_class_0, st_dsize, colsIn, colsOut);
hipMemcpyAsync(h_class+c_deslocamento,d_class_0,st_csize * sizeof(int),hipMemcpyDeviceToHost, strm0);
streams_processados++;
c_deslocamento=streams_processados*st_csize;
d_deslocamento=streams_processados*st_dsize;
//std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl;
hipMemcpyAsync(d_dados_1,h_dados+d_deslocamento,st_dsize * sizeof(int),hipMemcpyHostToDevice, strm1);
hipMemcpyAsync(d_class_1,h_class+c_deslocamento,st_csize * sizeof(int),hipMemcpyHostToDevice, strm1);
/*--- invoca o kernel de classificao ---*/
hipLaunchKernelGGL(( Classif), dim3(v_blocos),dim3(v_threads),0, strm1, d_dados_1, d_class_1, st_dsize, colsIn, colsOut);
hipMemcpyAsync(h_class+c_deslocamento,d_class_1,st_csize * sizeof(int),hipMemcpyDeviceToHost, strm1);
streams_processados++;
last_i_proc=i;
last_c_proc=c;
}
} else {
if (i == dsize) {
c_deslocamento=csize-last_c_proc; //((streams_processados*st_csize)+st_csize);
d_deslocamento=dsize-last_i_proc; //((streams_processados*st_dsize)+st_dsize);
//std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl;
hipMemcpyAsync(d_dados_0,h_dados+d_deslocamento,st_dsize * sizeof(int),hipMemcpyHostToDevice, strm0);
hipMemcpyAsync(d_class_0,h_class+c_deslocamento,st_csize * sizeof(int),hipMemcpyHostToDevice, strm0);
/*--- invoca o kernel de classificao ---*/
hipLaunchKernelGGL(( Classif), dim3(v_blocos),dim3(v_threads),0, strm0, d_dados_0, d_class_0, st_dsize, colsIn, colsOut);
hipMemcpyAsync(h_class+c_deslocamento,d_class_0,st_csize * sizeof(int),hipMemcpyDeviceToHost, strm0);
}
}
i+=colsIn;
c+=colsOut;
}
std::cout<<" <Carregou h_dados com "<< i <<" posies e h_class com "<< c << " posicoes"<<std::endl;
t_tmp2=(unsigned long int) clock();
std::cout<<" <Calculou v_blocos com "<< v_blocos <<" lins=" << lins << " threads com "<< v_threads <<std::endl;
std::cout<<" <dsize "<< dsize << " colsIn="<<colsIn<<" colsOut="<< colsOut<<std::endl;
t_tmp3=(unsigned long int) clock();
hipStreamSynchronize(strm0);
hipStreamSynchronize(strm1);
t_tmp4=(unsigned long int) clock();
//std::cout<<" <Sincronizou -------------------"<<std::endl;
fnl="log/Classif_StreamG7-T"+ s_threads +dateStr+".log.txt";
//arqo=fnl.c_str();
std::ofstream mylog (fnl.c_str());
//std::ofstream mylog (arqo);
mylog<<"Processado em "<< dateStr <<std::endl;
mylog<<"Processado em "<< v_blocos <<" blocos com "<< v_threads <<" threads, com "<< partes <<" partes"<<std::endl;
mylog<<"Tempo total de classificaao (ler CSV e classificar via stream/kernel)= "<< calcula_tempo(t_tmp1, t_tmp2) <<std::endl;
//mylog<<"Tempo total de cpia host >> device = "<< calcula_tempo(t_tmp1, t_tmp2) <<std::endl;
mylog<<"Tempo total de Stream Synchronize >> host = "<< calcula_tempo(t_tmp3, t_tmp4) <<std::endl;
/*---- fecha o arquivo de entrada de registros a classificar*/
fin.close();
/*--- cria o nome do arquivo csv de sada com as classificaes ----*/
//fn="/home/UFF/GPU/Trabalho/Dados/Classif_Kernel"+dateStr+".csv";
fn="csv/Classif_StreamT"+ s_threads +dateStr+".csv";
//std::cout<<std::endl<<fn <<std::endl;
t_tmp=(unsigned long int) clock();
/*--- abre o csv de sada ---*/
std::ofstream myfile (fn.c_str());
myfile<<"Indice,IdClasse"<<std::endl;
/*--- exporta o contedo do vetor h_class ---*/
for (i=0; i<csize; i+=colsOut)
{
myfile<<h_class[i]<<','<<h_class[i+1]<<"\n";
}
myfile.close();
mylog<<"Tempo para exportar classificaao para CSV= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
t_tmp=(unsigned long int) clock();
/*------------- libera memoria ------------------------*/
hipFree(d_dados_0);
hipFree(d_class_0);
hipFree(d_dados_1);
hipFree(d_class_1);
hipHostFree(h_dados);
hipHostFree(h_class);
mylog<<"Tempo para liberar memoria GPU= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
// desaloca a matriz << no Thtrust a desalocao dos vetores transparente ---------------
//mylog<<"Tempo para free matriz = "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
/* tempo final */
t_fin = (unsigned long int) clock();
mylog<<"Total de registros classificados= "<< lins <<std::endl;
mylog<<"Tempo total de processamento= "<< setprecision(6) << calcula_tempo(t_ini, t_fin) <<std::endl;
gettimeofday(&end, NULL);
delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
mylog<<"Tempo total de processamento 2 = "<< delta <<std::endl;
mylog.close();
std::cout<<std::endl<<"Tempo total de processamento= "<< calcula_tempo(t_ini, t_fin) <<std::endl;
std::cout<<"Tempo total de processamento 2 = "<< delta <<std::endl;
}
else
{
std::cout<<std::endl<<"Erro na abertura do arquivo "<< nome <<std::endl;
}
}
//---------------------------------------------------------------------------
int main(int argc, char * argv[])
{
long nlin=0;
int nthd=0;
if (argc < 4){
std::cout<<"Digite o nome do arquivo de entrada e a quantidade de registros e quantas threads"<<std::endl;
abort();
}
// File
std::cout<<" <Arquivo de entrada> = "<<argv[1]<<std::endl;
//nlin=std::stol(argv[2]);
nlin=std::atol(argv[2]);
nthd=std::atoi(argv[3]);
/* processa a classificaao */
std::cout<<" <Qtd Registros> = "<<nlin<<std::endl;
Classif_GPU(argv[1],nlin,nthd,argv[3]);
}
|
d225c6c1fb753a672a9990a07a30d8ae806b5fa8.cu
|
#include <iostream>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <stdlib.h>
#include <locale>
#include <string>
#include <limits>
#include <time.h>
#include <stdio.h>
#include <iomanip>
#include <sys/time.h>
using namespace std;
//------------ Kernel de Processamento
__global__ void Classif(int* d_dados, int* d_class, long dsize, int colsIn, int colsOut)
{
int i=(threadIdx.x * colsIn) + (blockIdx.x * blockDim.x * colsIn);
int o=(threadIdx.x * colsOut) + (blockIdx.x * blockDim.x * colsOut);
int VlOpen,VlHigh,VlLow,VlClose,classe;
//int classe;
if (i<=dsize) {
VlOpen = d_dados[i+1];
VlHigh = d_dados[i+2];
VlLow = d_dados[i+3];
VlClose = d_dados[i+4];
classe=(VlOpen==VlClose ? 512: VlOpen>VlClose ? 256:1024)+(VlLow<VlOpen ? 1:4)+(VlLow<VlClose ? 2:8)+(VlHigh>VlOpen ? 16:64)+(VlHigh>VlClose ? 32:128);
//classe=(d_dados[i+1]==d_dados[i+4] ? 512: d_dados[i+1]>d_dados[i+4] ? 256:1024)+(d_dados[i+3]<d_dados[i+1] ? 1:4)+(d_dados[i+3]<d_dados[i+4] ? 2:8)+(d_dados[i+2]>d_dados[i+1] ? 16:64)+(d_dados[i+2]>d_dados[i+4] ? 32:128);
d_class[o]=d_dados[i];
//d_class[o]=12;
d_class[o+1]=classe;
}
}
//--------------------- Funcoes de tempo --------------------------------
std::string DataHora()
{
time_t rawtime;
struct tm * timeinfo;
char buffer [20];
time ( &rawtime );
timeinfo = localtime ( &rawtime );
strftime (buffer,20,"%F %H%M%S",timeinfo);
return buffer;
}
/* funcao de tempo */
double calcula_tempo(const unsigned long int ini, const unsigned long int fim)
{
double r;
if(fim >= ini)
r = ((double)(fim - ini)) / CLOCKS_PER_SEC;
else
r = ((double)( (fim + (unsigned long int)-1) - ini)) / CLOCKS_PER_SEC;
return r;
}
//------- Classif_paralela:: / std::string ---------------------------
void Classif_GPU(const char * nome, long plins, int nthd, const char * sthd){
char arq[256];
//char arqo[256];
//std::ifstream fin;
int colsIn=5, colsOut=2;
long lins,i, c, last_i_proc, last_c_proc;
int dsize, csize, st_dsize, st_csize, partes, st_gatilho;
//int classe,VlOpen,VlHigh,VlLow,VlClose;
int v_blocos,v_threads, streams_processados, d_deslocamento,c_deslocamento;
std::string sIndice,sVlOpen,sVlHigh,sVlLow,sVlClose;
unsigned long int t_ini;
unsigned long int t_fin;
unsigned long int t_tmp;
unsigned long int t_tmp1;
unsigned long int t_tmp2;
unsigned long int t_tmp3;
unsigned long int t_tmp4;
std::string dateStr,fn,fnl,s_threads;
/*--- define variaveis de tempo -------------*/
timeval start, end;
double delta;
dateStr=DataHora();
std::cout<<" <DataHora > = "<<dateStr<<std::endl;
/* tempo inicial */
t_ini = (unsigned long int) clock;
gettimeofday(&start, NULL); //marcador de início do processamento
/* -- define as dimensões dos vetores que serão criados em logar de matrizes */
/* -- dsize define o tamanho do vetor de dados em função do numero de linhas e colunas*/
dsize=plins*colsIn;
/* -- csize define o tamanho do vetor de classificacao em função do numero de linhas e colunas*/
csize=plins*colsOut;
/* ----- Calcula o tamanho dos streams, de acordo com o numero de partes -----------*/
partes=40;
st_dsize=0;
st_csize=0;
st_dsize=(int)floor((int)dsize/partes);
st_csize=(int)floor((int)csize/partes);
/* ----- Calcula o ponto de executar os streams, de acordo com o numero de partes, mas a cada gatilho executa 2 streams -----------*/
st_gatilho=(int)floor((int)dsize/partes);
st_gatilho*=2;
/* -- Cria os vetores que conterão os dados lidos do arquivo e a classificação */
int *h_dados;
int *h_class;
int *d_dados_0;
int *d_class_0;
int *d_dados_1;
int *d_class_1;
/*-------------------------- Define os streams ----------------------------------------*/
cudaStream_t strm0, strm1;
cudaStreamCreate(&strm0);
cudaStreamCreate(&strm1);
std::cout<<" vai alocar memoria na GPU st_dsize= "<< st_dsize <<" st_csize= "<< st_csize<<std::endl;
/*-------------------------- Aloca os vetores no device ----------------------------------------*/
cudaMalloc((void**) &d_dados_0, st_dsize * sizeof(int));
cudaMalloc((void**) &d_class_0, st_csize * sizeof(int));
cudaMalloc((void**) &d_dados_1, st_dsize * sizeof(int));
cudaMalloc((void**) &d_class_1, st_csize * sizeof(int));
/*-------------------------- Aloca os vetores no host ----------------------------------------*/
cudaHostAlloc((void**) &h_dados, dsize*sizeof(int),cudaHostAllocDefault);
cudaHostAlloc((void**) &h_class, csize*sizeof(int),cudaHostAllocDefault);
lins=plins-0;
std::cout<<" <inicializou lns> = "<<lins<<std::endl;
/*--- pega o num de threads digitadas e calcula os blocos ------------------------- */
v_threads=nthd;
s_threads=std::string(sthd);
v_blocos=(int)ceil((float)(lins/partes)/v_threads);
std::cout<<" <Calculou v_blocos com "<< v_blocos <<" threads com "<< v_threads<<" st_gatilho com "<< st_gatilho <<" dsize="<<dsize<<std::endl;
/* ----- Abre o arquivo csv e inicia a carga dos vetores ------------------- */
strcpy(arq,nome);
ifstream fin(arq);
t_tmp1=(unsigned long int) clock();
if (fin.is_open())
{
t_tmp=(unsigned long int) clock();
/*--- carrega o arquivo no vetor host h_dados e inicializa h_class, transformando valores float em int*/
i=0;
c=0;
streams_processados=0;
c_deslocamento=0;
d_deslocamento=0;
while (fin.good())
{
getline(fin,sIndice,',');
getline(fin,sVlOpen,',');
getline(fin,sVlHigh,',');
getline(fin,sVlLow,',');
getline(fin,sVlClose,'\n');
//std::cout<<"sIndice= "<< sIndice <<"sVlOpen= "<< sVlOpen<<"sVlHigh= "<< sVlHigh<<"sVlLow= "<< sVlLow<<"sVlClose= "<< sVlClose<<std::endl;
//h_dados[i]=std::stoi(sIndice);
h_dados[i]=std::atoi(sIndice.c_str());
//h_dados[i+1]=static_cast<int>(std::stof(sVlOpen,NULL)*100);
h_dados[i+1]=static_cast<int>(std::atof(sVlOpen.c_str())*100);
h_dados[i+2]=static_cast<int>(std::atof(sVlHigh.c_str())*100);
h_dados[i+3]=static_cast<int>(std::atof(sVlLow.c_str())*100);
h_dados[i+4]=static_cast<int>(std::atof(sVlClose.c_str())*100);
h_class[c]=0;
h_class[c+1]=0;
//std::cout<<"Indice= "<< h_dados[i] <<"VlOpen= "<< h_dados[i+1]<<"VlHigh= "<< h_dados[i+2]<<"sVlLow= "<< h_dados[i+3]<<"VlClose= "<< h_dados[i+4]<<std::endl;
/*--- Se atingiu o ponto de transferir os dados (st_gatilho) ou atingiu o último indice de dados -----------
---- st_dsize-colsOut significa o último registro do stream, st_dsize é o inicio do próximo stream --------
-------------------- copia os vetores e dispara o kernel -------------------------------------------------*/
if ((i>0) && (i<dsize)) {
if ((i % st_gatilho) == 0)
{
c_deslocamento=streams_processados*st_csize;
d_deslocamento=streams_processados*st_dsize;
//std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl;
cudaMemcpyAsync(d_dados_0,h_dados+d_deslocamento,st_dsize * sizeof(int),cudaMemcpyHostToDevice, strm0);
cudaMemcpyAsync(d_class_0,h_class+c_deslocamento,st_csize * sizeof(int),cudaMemcpyHostToDevice, strm0);
/*--- invoca o kernel de classificação ---*/
Classif<<<v_blocos,v_threads,0, strm0>>>(d_dados_0, d_class_0, st_dsize, colsIn, colsOut);
cudaMemcpyAsync(h_class+c_deslocamento,d_class_0,st_csize * sizeof(int),cudaMemcpyDeviceToHost, strm0);
streams_processados++;
c_deslocamento=streams_processados*st_csize;
d_deslocamento=streams_processados*st_dsize;
//std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl;
cudaMemcpyAsync(d_dados_1,h_dados+d_deslocamento,st_dsize * sizeof(int),cudaMemcpyHostToDevice, strm1);
cudaMemcpyAsync(d_class_1,h_class+c_deslocamento,st_csize * sizeof(int),cudaMemcpyHostToDevice, strm1);
/*--- invoca o kernel de classificação ---*/
Classif<<<v_blocos,v_threads,0, strm1>>>(d_dados_1, d_class_1, st_dsize, colsIn, colsOut);
cudaMemcpyAsync(h_class+c_deslocamento,d_class_1,st_csize * sizeof(int),cudaMemcpyDeviceToHost, strm1);
streams_processados++;
last_i_proc=i;
last_c_proc=c;
}
} else {
if (i == dsize) {
c_deslocamento=csize-last_c_proc; //((streams_processados*st_csize)+st_csize);
d_deslocamento=dsize-last_i_proc; //((streams_processados*st_dsize)+st_dsize);
//std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl;
cudaMemcpyAsync(d_dados_0,h_dados+d_deslocamento,st_dsize * sizeof(int),cudaMemcpyHostToDevice, strm0);
cudaMemcpyAsync(d_class_0,h_class+c_deslocamento,st_csize * sizeof(int),cudaMemcpyHostToDevice, strm0);
/*--- invoca o kernel de classificação ---*/
Classif<<<v_blocos,v_threads,0, strm0>>>(d_dados_0, d_class_0, st_dsize, colsIn, colsOut);
cudaMemcpyAsync(h_class+c_deslocamento,d_class_0,st_csize * sizeof(int),cudaMemcpyDeviceToHost, strm0);
}
}
i+=colsIn;
c+=colsOut;
}
std::cout<<" <Carregou h_dados com "<< i <<" posições e h_class com "<< c << " posicoes"<<std::endl;
t_tmp2=(unsigned long int) clock();
std::cout<<" <Calculou v_blocos com "<< v_blocos <<" lins=" << lins << " threads com "<< v_threads <<std::endl;
std::cout<<" <dsize "<< dsize << " colsIn="<<colsIn<<" colsOut="<< colsOut<<std::endl;
t_tmp3=(unsigned long int) clock();
cudaStreamSynchronize(strm0);
cudaStreamSynchronize(strm1);
t_tmp4=(unsigned long int) clock();
//std::cout<<" <Sincronizou -------------------"<<std::endl;
fnl="log/Classif_StreamG7-T"+ s_threads +dateStr+".log.txt";
//arqo=fnl.c_str();
std::ofstream mylog (fnl.c_str());
//std::ofstream mylog (arqo);
mylog<<"Processado em "<< dateStr <<std::endl;
mylog<<"Processado em "<< v_blocos <<" blocos com "<< v_threads <<" threads, com "<< partes <<" partes"<<std::endl;
mylog<<"Tempo total de classificaçao (ler CSV e classificar via stream/kernel)= "<< calcula_tempo(t_tmp1, t_tmp2) <<std::endl;
//mylog<<"Tempo total de cópia host >> device = "<< calcula_tempo(t_tmp1, t_tmp2) <<std::endl;
mylog<<"Tempo total de Stream Synchronize >> host = "<< calcula_tempo(t_tmp3, t_tmp4) <<std::endl;
/*---- fecha o arquivo de entrada de registros a classificar*/
fin.close();
/*--- cria o nome do arquivo csv de saída com as classificações ----*/
//fn="/home/UFF/GPU/Trabalho/Dados/Classif_Kernel"+dateStr+".csv";
fn="csv/Classif_StreamT"+ s_threads +dateStr+".csv";
//std::cout<<std::endl<<fn <<std::endl;
t_tmp=(unsigned long int) clock();
/*--- abre o csv de saída ---*/
std::ofstream myfile (fn.c_str());
myfile<<"Indice,IdClasse"<<std::endl;
/*--- exporta o conteúdo do vetor h_class ---*/
for (i=0; i<csize; i+=colsOut)
{
myfile<<h_class[i]<<','<<h_class[i+1]<<"\n";
}
myfile.close();
mylog<<"Tempo para exportar classificaçao para CSV= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
t_tmp=(unsigned long int) clock();
/*------------- libera memoria ------------------------*/
cudaFree(d_dados_0);
cudaFree(d_class_0);
cudaFree(d_dados_1);
cudaFree(d_class_1);
cudaFreeHost(h_dados);
cudaFreeHost(h_class);
mylog<<"Tempo para liberar memoria GPU= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
// desaloca a matriz << no Thtrust a desalocação dos vetores é transparente ---------------
//mylog<<"Tempo para free matriz = "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl;
/* tempo final */
t_fin = (unsigned long int) clock();
mylog<<"Total de registros classificados= "<< lins <<std::endl;
mylog<<"Tempo total de processamento= "<< setprecision(6) << calcula_tempo(t_ini, t_fin) <<std::endl;
gettimeofday(&end, NULL);
delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6;
mylog<<"Tempo total de processamento 2 = "<< delta <<std::endl;
mylog.close();
std::cout<<std::endl<<"Tempo total de processamento= "<< calcula_tempo(t_ini, t_fin) <<std::endl;
std::cout<<"Tempo total de processamento 2 = "<< delta <<std::endl;
}
else
{
std::cout<<std::endl<<"Erro na abertura do arquivo "<< nome <<std::endl;
}
}
//---------------------------------------------------------------------------
int main(int argc, char * argv[])
{
long nlin=0;
int nthd=0;
if (argc < 4){
std::cout<<"Digite o nome do arquivo de entrada e a quantidade de registros e quantas threads"<<std::endl;
abort();
}
// File
std::cout<<" <Arquivo de entrada> = "<<argv[1]<<std::endl;
//nlin=std::stol(argv[2]);
nlin=std::atol(argv[2]);
nthd=std::atoi(argv[3]);
/* processa a classificaçao */
std::cout<<" <Qtd Registros> = "<<nlin<<std::endl;
Classif_GPU(argv[1],nlin,nthd,argv[3]);
}
|
b6e52dc7cd1544d3300ba3437d0129c3a21e6fc4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "xMaxDeltaIntegralFracKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *intData = NULL;
hipMalloc(&intData, XSIZE*YSIZE);
const int intDataStrideChannel = 1;
float *tmpArray = NULL;
hipMalloc(&tmpArray, XSIZE*YSIZE);
const int batchSize = 1;
const int nInputPlane = 1;
const int nWindows = 1;
const int h = 1;
const int w = 1;
const float *xMax = NULL;
hipMalloc(&xMax, XSIZE*YSIZE);
const float *yMin = NULL;
hipMalloc(&yMin, XSIZE*YSIZE);
const float *yMax = NULL;
hipMalloc(&yMax, XSIZE*YSIZE);
const float *inData = NULL;
hipMalloc(&inData, XSIZE*YSIZE);
const int inDataStrideRow = 1;
const int inDataStrideChannel = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
xMaxDeltaIntegralFracKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,intDataStrideChannel,tmpArray,batchSize,nInputPlane,nWindows,h,w,xMax,yMin,yMax,inData,inDataStrideRow,inDataStrideChannel);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
xMaxDeltaIntegralFracKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,intDataStrideChannel,tmpArray,batchSize,nInputPlane,nWindows,h,w,xMax,yMin,yMax,inData,inDataStrideRow,inDataStrideChannel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
xMaxDeltaIntegralFracKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, intData,intDataStrideChannel,tmpArray,batchSize,nInputPlane,nWindows,h,w,xMax,yMin,yMax,inData,inDataStrideRow,inDataStrideChannel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
b6e52dc7cd1544d3300ba3437d0129c3a21e6fc4.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "xMaxDeltaIntegralFracKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *intData = NULL;
cudaMalloc(&intData, XSIZE*YSIZE);
const int intDataStrideChannel = 1;
float *tmpArray = NULL;
cudaMalloc(&tmpArray, XSIZE*YSIZE);
const int batchSize = 1;
const int nInputPlane = 1;
const int nWindows = 1;
const int h = 1;
const int w = 1;
const float *xMax = NULL;
cudaMalloc(&xMax, XSIZE*YSIZE);
const float *yMin = NULL;
cudaMalloc(&yMin, XSIZE*YSIZE);
const float *yMax = NULL;
cudaMalloc(&yMax, XSIZE*YSIZE);
const float *inData = NULL;
cudaMalloc(&inData, XSIZE*YSIZE);
const int inDataStrideRow = 1;
const int inDataStrideChannel = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
xMaxDeltaIntegralFracKernel<<<gridBlock,threadBlock>>>(intData,intDataStrideChannel,tmpArray,batchSize,nInputPlane,nWindows,h,w,xMax,yMin,yMax,inData,inDataStrideRow,inDataStrideChannel);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
xMaxDeltaIntegralFracKernel<<<gridBlock,threadBlock>>>(intData,intDataStrideChannel,tmpArray,batchSize,nInputPlane,nWindows,h,w,xMax,yMin,yMax,inData,inDataStrideRow,inDataStrideChannel);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
xMaxDeltaIntegralFracKernel<<<gridBlock,threadBlock>>>(intData,intDataStrideChannel,tmpArray,batchSize,nInputPlane,nWindows,h,w,xMax,yMin,yMax,inData,inDataStrideRow,inDataStrideChannel);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
d67756c473a3c126e5f6df23e6664528fddb9d6f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include "CUDABackground.h"
#include "FileReader.h"
#include <atomic>
#include <iostream>
#include <pplinterface.h>
using namespace std;
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
hipError_t findFrequents(int**main, unsigned int *counts, int mainSize, int countsSize);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void findFrequents(int**mainArray, unsigned int *counts)
{
int i = threadIdx.x;
//int end = mainArray[i][0] + 1;
printf("%d:\t%d\n", threadIdx.x, mainArray[i][1]);
int * startAddy = mainArray[i];
for(int j = 1; j <= 1; j++)
{
//atomicAdd(counts + mainArray[i][j], 1);
}
}
int main()
{
CUDABackground cuda = CUDABackground();
FileReader f = FileReader("data.txt");
int cudaCores = cuda.calculateCores();
unsigned int * frequency = (unsigned int *)malloc(sizeof(unsigned int) * f.maxNumber);
for (int i = 0; i < f.maxNumber; i++)
{
frequency[i] = 0;
}
for (int i = 0; i < 50; i++)
{
printf("\n%d: ", i);
for (int j = 0; j < f.master[i][0];j++)
printf("%d,", f.master[i][j]);
}
cout<<frequency[0];
1 + 1;
hipError_t cudaStatus = findFrequents(f.master, frequency, f.count, f.maxNumber);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
int test2 = frequency[0];
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
//hipError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t findFrequents(int* main[], unsigned int *counts, int mainSize, int countsSize)
{
int **devMain = 0;
unsigned int * devCounts = 0;
hipError_t cudaStatus;
cudaStatus = hipMalloc((void***)&devMain, mainSize * sizeof(int*));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
for (int i = 0; i < mainSize; i++)
{
int* temp;
hipMalloc((void**)&temp, sizeof(int)* (main[i][0] + 1)); // allocate for 1 int in each int pointer
hipMemcpy(temp, main[i], sizeof(int) * (main[i][0] + 1), hipMemcpyHostToDevice); // copy data
hipMemcpy(devMain + i, &temp, sizeof(int*), hipMemcpyHostToDevice);
}
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = hipMalloc((void**)&devMain, mainSize * sizeof(int*));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&devCounts, countsSize * sizeof(unsigned int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMemcpy(devCounts, counts, countsSize * sizeof(unsigned int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
findFrequents << <1, 500 >> > (devMain, devCounts);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(counts, devCounts, countsSize * sizeof(unsigned int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(devMain);
hipFree(counts);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!");
goto Error;
}
Error:
hipFree(dev_c);
hipFree(dev_a);
hipFree(dev_b);
return cudaStatus;
}
|
d67756c473a3c126e5f6df23e6664528fddb9d6f.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include "CUDABackground.h"
#include "FileReader.h"
#include <atomic>
#include <iostream>
#include <pplinterface.h>
using namespace std;
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
cudaError_t findFrequents(int**main, unsigned int *counts, int mainSize, int countsSize);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void findFrequents(int**mainArray, unsigned int *counts)
{
int i = threadIdx.x;
//int end = mainArray[i][0] + 1;
printf("%d:\t%d\n", threadIdx.x, mainArray[i][1]);
int * startAddy = mainArray[i];
for(int j = 1; j <= 1; j++)
{
//atomicAdd(counts + mainArray[i][j], 1);
}
}
int main()
{
CUDABackground cuda = CUDABackground();
FileReader f = FileReader("data.txt");
int cudaCores = cuda.calculateCores();
unsigned int * frequency = (unsigned int *)malloc(sizeof(unsigned int) * f.maxNumber);
for (int i = 0; i < f.maxNumber; i++)
{
frequency[i] = 0;
}
for (int i = 0; i < 50; i++)
{
printf("\n%d: ", i);
for (int j = 0; j < f.master[i][0];j++)
printf("%d,", f.master[i][j]);
}
cout<<frequency[0];
1 + 1;
cudaError_t cudaStatus = findFrequents(f.master, frequency, f.count, f.maxNumber);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
int test2 = frequency[0];
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
//cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t findFrequents(int* main[], unsigned int *counts, int mainSize, int countsSize)
{
int **devMain = 0;
unsigned int * devCounts = 0;
cudaError_t cudaStatus;
cudaStatus = cudaMalloc((void***)&devMain, mainSize * sizeof(int*));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
for (int i = 0; i < mainSize; i++)
{
int* temp;
cudaMalloc((void**)&temp, sizeof(int)* (main[i][0] + 1)); // allocate for 1 int in each int pointer
cudaMemcpy(temp, main[i], sizeof(int) * (main[i][0] + 1), cudaMemcpyHostToDevice); // copy data
cudaMemcpy(devMain + i, &temp, sizeof(int*), cudaMemcpyHostToDevice);
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&devMain, mainSize * sizeof(int*));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&devCounts, countsSize * sizeof(unsigned int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMemcpy(devCounts, counts, countsSize * sizeof(unsigned int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
findFrequents << <1, 500 >> > (devMain, devCounts);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(counts, devCounts, countsSize * sizeof(unsigned int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(devMain);
cudaFree(counts);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
05ff48b9dc77111e1c4329c97ea45d7fa897047c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
@author ysbecca Rebecca Young [sc16rsmy]
@description A Sobel image filter on GPU
COMP5811 Parallel & Concurrent Programming
University of Leeds 2016-2017
Prof. David Duke
*/
#include <stdio.h>
#define CHECK(e) { int res = (e); if (res) printf("CUDA ERROR %d\n", res); }
#define THRESHOLD 10000
#define WIDTH 640
#define HEIGHT 480
// Create textures for the images
texture<char> tex_input;
texture<char> tex_output;
#include "textures.h"
// Image data structure.
struct Image {
int width;
int height;
unsigned char *img;
unsigned char *dev_img;
};
int main(int argc, char **argv)
{
Image source;
if (argc != 2)
{
printf("Usage: exec filename\n");
exit(1);
}
char *fname = argv[1];
FILE *src;
if (!(src = fopen(fname, "rb")))
{
printf("Couldn't open file %s for reading.\n", fname);
exit(1);
}
char p,s;
fscanf(src, "%c%c\n", &p, &s);
if (p != 'P' || s != '6')
{
printf("Not a valid PPM file (%c %c)\n", p, s);
exit(1);
}
fscanf(src, "%d %d\n", &source.width, &source.height);
int ignored;
fscanf(src, "%d\n", &ignored);
int pixels = source.width * source.height;
source.img = (unsigned char *)malloc(pixels*3);
if (fread(source.img, sizeof(unsigned char), pixels*3, src) != pixels*3)
{
printf("Error reading file.\n");
exit(1);
}
fclose(src);
Image grayScale;
grayScale.width = source.width;
grayScale.height = source.height;
printf("Width %d, height %d\n", source.width, source.height);
grayScale.img = (unsigned char *)malloc(pixels);
for (int i = 0; i < pixels; i++)
{
unsigned int r = source.img[i*3];
unsigned int g = source.img[i*3 + 1];
unsigned int b = source.img[i*3 + 2];
grayScale.img[i] = 0.2989*r + 0.5870*g + 0.1140*b;
}
// The structure on CPU for the filtered image to be saved into.
Image filtered;
filtered.width = source.width;
filtered.height = source.height;
filtered.img = (unsigned char *)malloc(pixels);
// Allocate memory for the images on the GPU
CHECK( hipMalloc( (void**)&filtered.dev_img, pixels ) );
CHECK( hipMalloc( (void**)&grayScale.dev_img, pixels ) );
// Copy the output array from host to device
CHECK( hipMemcpy( filtered.dev_img, filtered.img, pixels, hipMemcpyHostToDevice) );
CHECK( hipMemcpy( grayScale.dev_img, grayScale.img, pixels, hipMemcpyHostToDevice) );
// Bind textures
CHECK( hipBindTexture(NULL, tex_input, grayScale.dev_img, pixels) );
// One thread per pixel; assume image size /32
dim3 grid(source.width, source.height);
// Start the timer.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
// Do work on GPU.
hipLaunchKernelGGL(( apply_sobel_textures), dim3(grid),dim3(1), 0, 0, filtered.dev_img );
// Stop time.
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
// Copy the results back from device to host image
CHECK( hipMemcpy( filtered.img, filtered.dev_img, pixels, hipMemcpyDeviceToHost) );
hipUnbindTexture(tex_input);
// Display the elapsed time
float t;
hipEventElapsedTime(&t, start, stop);
printf("GPU took %f to complete task.\n", t);
// Writes the filtered image back to my_sobel.pgm
FILE *out;
if (!(out = fopen("my_sobel.pgm", "wb")))
{
printf("Couldn't open file for output.\n");
exit(1);
}
fprintf(out, "P5\n%d %d\n255\n", filtered.width, filtered.height);
if (fwrite(filtered.img, sizeof(unsigned char), pixels, out) != pixels)
{
printf("Error writing file.\n");
exit(1);
}
fclose(out);
// Tidy-up everything
free(grayScale.img);
free(source.img);
free(filtered.img);
hipEventDestroy(start);
hipEventDestroy(stop);
exit(0);
}
|
05ff48b9dc77111e1c4329c97ea45d7fa897047c.cu
|
/*
@author ysbecca Rebecca Young [sc16rsmy]
@description A Sobel image filter on GPU
COMP5811 Parallel & Concurrent Programming
University of Leeds 2016-2017
Prof. David Duke
*/
#include <stdio.h>
#define CHECK(e) { int res = (e); if (res) printf("CUDA ERROR %d\n", res); }
#define THRESHOLD 10000
#define WIDTH 640
#define HEIGHT 480
// Create textures for the images
texture<char> tex_input;
texture<char> tex_output;
#include "textures.h"
// Image data structure.
struct Image {
int width;
int height;
unsigned char *img;
unsigned char *dev_img;
};
int main(int argc, char **argv)
{
Image source;
if (argc != 2)
{
printf("Usage: exec filename\n");
exit(1);
}
char *fname = argv[1];
FILE *src;
if (!(src = fopen(fname, "rb")))
{
printf("Couldn't open file %s for reading.\n", fname);
exit(1);
}
char p,s;
fscanf(src, "%c%c\n", &p, &s);
if (p != 'P' || s != '6')
{
printf("Not a valid PPM file (%c %c)\n", p, s);
exit(1);
}
fscanf(src, "%d %d\n", &source.width, &source.height);
int ignored;
fscanf(src, "%d\n", &ignored);
int pixels = source.width * source.height;
source.img = (unsigned char *)malloc(pixels*3);
if (fread(source.img, sizeof(unsigned char), pixels*3, src) != pixels*3)
{
printf("Error reading file.\n");
exit(1);
}
fclose(src);
Image grayScale;
grayScale.width = source.width;
grayScale.height = source.height;
printf("Width %d, height %d\n", source.width, source.height);
grayScale.img = (unsigned char *)malloc(pixels);
for (int i = 0; i < pixels; i++)
{
unsigned int r = source.img[i*3];
unsigned int g = source.img[i*3 + 1];
unsigned int b = source.img[i*3 + 2];
grayScale.img[i] = 0.2989*r + 0.5870*g + 0.1140*b;
}
// The structure on CPU for the filtered image to be saved into.
Image filtered;
filtered.width = source.width;
filtered.height = source.height;
filtered.img = (unsigned char *)malloc(pixels);
// Allocate memory for the images on the GPU
CHECK( cudaMalloc( (void**)&filtered.dev_img, pixels ) );
CHECK( cudaMalloc( (void**)&grayScale.dev_img, pixels ) );
// Copy the output array from host to device
CHECK( cudaMemcpy( filtered.dev_img, filtered.img, pixels, cudaMemcpyHostToDevice) );
CHECK( cudaMemcpy( grayScale.dev_img, grayScale.img, pixels, cudaMemcpyHostToDevice) );
// Bind textures
CHECK( cudaBindTexture(NULL, tex_input, grayScale.dev_img, pixels) );
// One thread per pixel; assume image size /32
dim3 grid(source.width, source.height);
// Start the timer.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Do work on GPU.
apply_sobel_textures<<<grid,1>>>( filtered.dev_img );
// Stop time.
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Copy the results back from device to host image
CHECK( cudaMemcpy( filtered.img, filtered.dev_img, pixels, cudaMemcpyDeviceToHost) );
cudaUnbindTexture(tex_input);
// Display the elapsed time
float t;
cudaEventElapsedTime(&t, start, stop);
printf("GPU took %f to complete task.\n", t);
// Writes the filtered image back to my_sobel.pgm
FILE *out;
if (!(out = fopen("my_sobel.pgm", "wb")))
{
printf("Couldn't open file for output.\n");
exit(1);
}
fprintf(out, "P5\n%d %d\n255\n", filtered.width, filtered.height);
if (fwrite(filtered.img, sizeof(unsigned char), pixels, out) != pixels)
{
printf("Error writing file.\n");
exit(1);
}
fclose(out);
// Tidy-up everything
free(grayScale.img);
free(source.img);
free(filtered.img);
cudaEventDestroy(start);
cudaEventDestroy(stop);
exit(0);
}
|
c219fb1b1bafe0404b8d42d41e8e8a473d7425b7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
hipMalloc(&d_A.elements, size);
hipMemcpy(d_A.elements, A.elements, size,
hipMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
hipMalloc(&d_B.elements, size);
hipMemcpy(d_B.elements, B.elements, size,
hipMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
hipMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
hipLaunchKernelGGL(( MatMulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_A, d_B, d_C);
printf("Hello2");
// Read C from device memory
hipMemcpy(C.elements, d_C.elements, size,
hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
int main()
{
float A_data[4] = {1,2,3,4};
float B_data[4] = {5,6,7,8};
Matrix A,B;
A.height = 2;
A.width = 2;
A.elements = A_data;
B.height = 2;
B.width = 2;
B.elements = B_data;
Matrix C;
C.height = 2;
C.width = 2;
MatMul(A,B,C);
printf("%f %f %f %f", C.elements[0], C.elements[1], C.elements[2], C.elements[3]);
}
|
c219fb1b1bafe0404b8d42d41e8e8a473d7425b7.cu
|
#include <stdio.h>
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
printf("Hello2");
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
int main()
{
float A_data[4] = {1,2,3,4};
float B_data[4] = {5,6,7,8};
Matrix A,B;
A.height = 2;
A.width = 2;
A.elements = A_data;
B.height = 2;
B.width = 2;
B.elements = B_data;
Matrix C;
C.height = 2;
C.width = 2;
MatMul(A,B,C);
printf("%f %f %f %f", C.elements[0], C.elements[1], C.elements[2], C.elements[3]);
}
|
8b48e781165e69502591df76bf6c8cb69b9d938a.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* Created on: Sep 21, 2018
* Author: Akila, Eranga, Eminda, Ruwan
**/
#include "cudaRHS.cuh"
enum VAR_CU {U_ALPHA=0,U_CHI,U_K,U_GT0,U_GT1,U_GT2,U_BETA0,U_BETA1,U_BETA2,U_B0,U_B1,U_B2,U_SYMGT0,U_SYMGT1,U_SYMGT2,U_SYMGT3,U_SYMGT4,U_SYMGT5,U_SYMAT0,U_SYMAT1,U_SYMAT2,U_SYMAT3,U_SYMAT4,U_SYMAT5};
void calc_bssnrhs(double * dev_var_out, double * dev_var_in, const unsigned int unzip_dof, const double * pmin, const double * pmax, const unsigned int * sz, const unsigned int& bflag, hipStream_t stream,
#include "para_derivs.h"
,
#include "para_staged.h"
)
{
CHECK_ERROR(hipMemsetAsync(dev_var_out, 0, 24*unzip_dof*sizeof(double), stream), "output array cleaning call"); // Clean output array
int alphaInt = (VAR_CU::U_ALPHA) * unzip_dof;
int chiInt = (VAR_CU::U_CHI) * unzip_dof;
int KInt = (VAR_CU::U_K) * unzip_dof;
int gt0Int = (VAR_CU::U_SYMGT0) * unzip_dof;
int gt1Int = (VAR_CU::U_SYMGT1) * unzip_dof;
int gt2Int = (VAR_CU::U_SYMGT2) * unzip_dof;
int gt3Int = (VAR_CU::U_SYMGT3) * unzip_dof;
int gt4Int = (VAR_CU::U_SYMGT4) * unzip_dof;
int gt5Int = (VAR_CU::U_SYMGT5) * unzip_dof;
int beta0Int = (VAR_CU::U_BETA0) * unzip_dof;
int beta1Int = (VAR_CU::U_BETA1) * unzip_dof;
int beta2Int = (VAR_CU::U_BETA2) * unzip_dof;
int At0Int = (VAR_CU::U_SYMAT0) * unzip_dof;
int At1Int = (VAR_CU::U_SYMAT1) * unzip_dof;
int At2Int = (VAR_CU::U_SYMAT2) * unzip_dof;
int At3Int = (VAR_CU::U_SYMAT3) * unzip_dof;
int At4Int = (VAR_CU::U_SYMAT4) * unzip_dof;
int At5Int = (VAR_CU::U_SYMAT5) * unzip_dof;
int Gt0Int = (VAR_CU::U_GT0) * unzip_dof;
int Gt1Int = (VAR_CU::U_GT1) * unzip_dof;
int Gt2Int = (VAR_CU::U_GT2) * unzip_dof;
int B0Int = (VAR_CU::U_B0) * unzip_dof;
int B1Int = (VAR_CU::U_B1) * unzip_dof;
int B2Int = (VAR_CU::U_B2) * unzip_dof;
double hx = (pmax[0] - pmin[0]) / (sz[0] - 1);
double hy = (pmax[1] - pmin[1]) / (sz[1] - 1);
double hz = (pmax[2] - pmin[2]) / (sz[2] - 1);
calc_deriv_kernel_wrapper(dev_var_out, dev_var_in, hx, hy, hz, sz, bflag, stream,
#include "args_derivs_offsets.h"
);
calc_bssn_eqns_kernel_wrapper(dev_var_in, dev_var_out, sz, pmin, hz, hy, hx, stream,
#include "args_derivs_offsets.h"
,
#include "args_staged.h"
);
if (bflag!=0) {
bssn_bcs(dev_var_out, dev_var_in, alphaInt, grad_0_alpha, grad_1_alpha, grad_2_alpha, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, chiInt, grad_0_chi, grad_1_chi, grad_2_chi, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, KInt, grad_0_K, grad_1_K, grad_2_K, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, beta0Int, grad_0_beta0, grad_1_beta0, grad_2_beta0, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, beta1Int, grad_0_beta1, grad_1_beta1, grad_2_beta1, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, beta2Int, grad_0_beta2, grad_1_beta2, grad_2_beta2, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, Gt0Int, grad_0_Gt0, grad_1_Gt0, grad_2_Gt0, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, Gt1Int, grad_0_Gt1, grad_1_Gt1, grad_2_Gt1, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, Gt2Int, grad_0_Gt2, grad_1_Gt2, grad_2_Gt2, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, B0Int, grad_0_B0, grad_1_B0, grad_2_B0, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, B1Int, grad_0_B1, grad_1_B1, grad_2_B1, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, B2Int, grad_0_B2, grad_1_B2, grad_2_B2, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At0Int, grad_0_At0, grad_1_At0, grad_2_At0, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At1Int, grad_0_At1, grad_1_At1, grad_2_At1, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At2Int, grad_0_At2, grad_1_At2, grad_2_At2, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At3Int, grad_0_At3, grad_1_At3, grad_2_At3, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At4Int, grad_0_At4, grad_1_At4, grad_2_At4, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At5Int, grad_0_At5, grad_1_At5, grad_2_At5, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt0Int, grad_0_gt0, grad_1_gt0, grad_2_gt0, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt1Int, grad_0_gt1, grad_1_gt1, grad_2_gt1, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt2Int, grad_0_gt2, grad_1_gt2, grad_2_gt2, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt3Int, grad_0_gt3, grad_1_gt3, grad_2_gt3, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt4Int, grad_0_gt4, grad_1_gt4, grad_2_gt4, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt5Int, grad_0_gt5, grad_1_gt5, grad_2_gt5, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
}
calc_ko_deriv_kernel_wrapper(dev_var_out, dev_var_in, hx, hy, hz, sz, bflag, stream,
#include "args_derivs_offsets.h"
);
// get_output_kernel_wrapper(dev_var_out, sz, stream,
// #include "args_derivs_offsets.h"
// );
return;
}
|
8b48e781165e69502591df76bf6c8cb69b9d938a.cu
|
/**
* Created on: Sep 21, 2018
* Author: Akila, Eranga, Eminda, Ruwan
**/
#include "cudaRHS.cuh"
enum VAR_CU {U_ALPHA=0,U_CHI,U_K,U_GT0,U_GT1,U_GT2,U_BETA0,U_BETA1,U_BETA2,U_B0,U_B1,U_B2,U_SYMGT0,U_SYMGT1,U_SYMGT2,U_SYMGT3,U_SYMGT4,U_SYMGT5,U_SYMAT0,U_SYMAT1,U_SYMAT2,U_SYMAT3,U_SYMAT4,U_SYMAT5};
void calc_bssnrhs(double * dev_var_out, double * dev_var_in, const unsigned int unzip_dof, const double * pmin, const double * pmax, const unsigned int * sz, const unsigned int& bflag, cudaStream_t stream,
#include "para_derivs.h"
,
#include "para_staged.h"
)
{
CHECK_ERROR(cudaMemsetAsync(dev_var_out, 0, 24*unzip_dof*sizeof(double), stream), "output array cleaning call"); // Clean output array
int alphaInt = (VAR_CU::U_ALPHA) * unzip_dof;
int chiInt = (VAR_CU::U_CHI) * unzip_dof;
int KInt = (VAR_CU::U_K) * unzip_dof;
int gt0Int = (VAR_CU::U_SYMGT0) * unzip_dof;
int gt1Int = (VAR_CU::U_SYMGT1) * unzip_dof;
int gt2Int = (VAR_CU::U_SYMGT2) * unzip_dof;
int gt3Int = (VAR_CU::U_SYMGT3) * unzip_dof;
int gt4Int = (VAR_CU::U_SYMGT4) * unzip_dof;
int gt5Int = (VAR_CU::U_SYMGT5) * unzip_dof;
int beta0Int = (VAR_CU::U_BETA0) * unzip_dof;
int beta1Int = (VAR_CU::U_BETA1) * unzip_dof;
int beta2Int = (VAR_CU::U_BETA2) * unzip_dof;
int At0Int = (VAR_CU::U_SYMAT0) * unzip_dof;
int At1Int = (VAR_CU::U_SYMAT1) * unzip_dof;
int At2Int = (VAR_CU::U_SYMAT2) * unzip_dof;
int At3Int = (VAR_CU::U_SYMAT3) * unzip_dof;
int At4Int = (VAR_CU::U_SYMAT4) * unzip_dof;
int At5Int = (VAR_CU::U_SYMAT5) * unzip_dof;
int Gt0Int = (VAR_CU::U_GT0) * unzip_dof;
int Gt1Int = (VAR_CU::U_GT1) * unzip_dof;
int Gt2Int = (VAR_CU::U_GT2) * unzip_dof;
int B0Int = (VAR_CU::U_B0) * unzip_dof;
int B1Int = (VAR_CU::U_B1) * unzip_dof;
int B2Int = (VAR_CU::U_B2) * unzip_dof;
double hx = (pmax[0] - pmin[0]) / (sz[0] - 1);
double hy = (pmax[1] - pmin[1]) / (sz[1] - 1);
double hz = (pmax[2] - pmin[2]) / (sz[2] - 1);
calc_deriv_kernel_wrapper(dev_var_out, dev_var_in, hx, hy, hz, sz, bflag, stream,
#include "args_derivs_offsets.h"
);
calc_bssn_eqns_kernel_wrapper(dev_var_in, dev_var_out, sz, pmin, hz, hy, hx, stream,
#include "args_derivs_offsets.h"
,
#include "args_staged.h"
);
if (bflag!=0) {
bssn_bcs(dev_var_out, dev_var_in, alphaInt, grad_0_alpha, grad_1_alpha, grad_2_alpha, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, chiInt, grad_0_chi, grad_1_chi, grad_2_chi, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, KInt, grad_0_K, grad_1_K, grad_2_K, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, beta0Int, grad_0_beta0, grad_1_beta0, grad_2_beta0, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, beta1Int, grad_0_beta1, grad_1_beta1, grad_2_beta1, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, beta2Int, grad_0_beta2, grad_1_beta2, grad_2_beta2, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, Gt0Int, grad_0_Gt0, grad_1_Gt0, grad_2_Gt0, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, Gt1Int, grad_0_Gt1, grad_1_Gt1, grad_2_Gt1, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, Gt2Int, grad_0_Gt2, grad_1_Gt2, grad_2_Gt2, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, B0Int, grad_0_B0, grad_1_B0, grad_2_B0, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, B1Int, grad_0_B1, grad_1_B1, grad_2_B1, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, B2Int, grad_0_B2, grad_1_B2, grad_2_B2, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At0Int, grad_0_At0, grad_1_At0, grad_2_At0, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At1Int, grad_0_At1, grad_1_At1, grad_2_At1, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At2Int, grad_0_At2, grad_1_At2, grad_2_At2, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At3Int, grad_0_At3, grad_1_At3, grad_2_At3, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At4Int, grad_0_At4, grad_1_At4, grad_2_At4, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, At5Int, grad_0_At5, grad_1_At5, grad_2_At5, pmin, pmax, 2.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt0Int, grad_0_gt0, grad_1_gt0, grad_2_gt0, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt1Int, grad_0_gt1, grad_1_gt1, grad_2_gt1, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt2Int, grad_0_gt2, grad_1_gt2, grad_2_gt2, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt3Int, grad_0_gt3, grad_1_gt3, grad_2_gt3, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt4Int, grad_0_gt4, grad_1_gt4, grad_2_gt4, pmin, pmax, 1.0, 0.0, sz, bflag, stream);
bssn_bcs(dev_var_out, dev_var_in, gt5Int, grad_0_gt5, grad_1_gt5, grad_2_gt5, pmin, pmax, 1.0, 1.0, sz, bflag, stream);
}
calc_ko_deriv_kernel_wrapper(dev_var_out, dev_var_in, hx, hy, hz, sz, bflag, stream,
#include "args_derivs_offsets.h"
);
// get_output_kernel_wrapper(dev_var_out, sz, stream,
// #include "args_derivs_offsets.h"
// );
return;
}
|
5c19e4c8b66760ecbf5e108c4e452c34de3205fb.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "types.h"
#include "dacrt.h"
#include "rtv.h"
#include "rply.h"
#include "timer.h"
#include "my_malloc.h"
#include <cfloat>
#include <vector>
#include <helper_math.h>
#include <cuda_gl_interop.h>
#include <iostream>
#include <fstream>
#define GENERATE_BLOCKSIZE 16
#define SHADE_BLOCKSIZE 256
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
static TriSet s_tris;
static RaySet s_rays;
static float* d_rox = 0;
static float* d_roy = 0;
static float* d_roz = 0;
static float* d_rdx = 0;
static float* d_rdy = 0;
static float* d_rdz = 0;
static int* d_id = 0;
static float* d_tmax = 0;
static int* d_hit = 0;
struct cudaGraphicsResource* s_pixelsResource = 0;
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
struct Canvas
{
int w;
int h;
};
struct Camera
{
float3 position;
float3 lowerLeftDir;
float3 du;
float3 dv;
int nu;
int nv;
};
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
static Canvas g_canvas;
static Camera g_camera;
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
__global__ void kernelGeneratePrimary(float3 camPos, float3 lowerLeftDir, float3 du, float3 dv, int numRaysX, int numRaysY,
float* rox, float* roy, float* roz, float* rdx, float* rdy, float* rdz, int* id, float* tmax, int* hit)
{
const int tx = blockIdx.x * blockDim.x + threadIdx.x;
const int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= numRaysX || ty >= numRaysY)
{
return;
}
const int idx = ty * numRaysX + tx;
rox[idx] = camPos.x;
roy[idx] = camPos.y;
roz[idx] = camPos.z;
float3 dir = lowerLeftDir + du * tx + dv * ty + du * numRaysX * ty;
rdx[idx] = dir.x;
rdy[idx] = dir.y;
rdz[idx] = dir.z;
tmax[idx] = FLT_MAX;
id[idx] = idx;
hit[idx] = -1;
}
void generatePrimary()
{
dim3 numBlocks;
numBlocks.x = ceilf(((float)g_camera.nu/(float)GENERATE_BLOCKSIZE));
numBlocks.y = ceilf(((float)g_camera.nv/(float)GENERATE_BLOCKSIZE));
dim3 blockSize;
blockSize.x = GENERATE_BLOCKSIZE;
blockSize.y = GENERATE_BLOCKSIZE;
hipLaunchKernelGGL(( kernelGeneratePrimary), dim3(numBlocks), dim3(blockSize), 0, 0, g_camera.position, g_camera.lowerLeftDir, g_camera.du, g_camera.dv, g_camera.nu, g_camera.nv,
d_rox, d_roy, d_roz, d_rdx, d_rdy, d_rdz, d_id, d_tmax, d_hit);
float* h_rdx = new float[s_rays.count];
float* h_rdy = new float[s_rays.count];
float* h_rdz = new float[s_rays.count];
hipMemcpy(h_rdx, d_rdx, s_rays.count*sizeof(float), hipMemcpyDefault);
hipMemcpy(h_rdy, d_rdy, s_rays.count*sizeof(float), hipMemcpyDefault);
hipMemcpy(h_rdy, d_rdy, s_rays.count*sizeof(float), hipMemcpyDefault);
std::ofstream f("out.txt");
for(int i = 0; i < s_rays.count; ++i)
{
f << h_rdx[i] << ", " << h_rdy[i] << ", " << h_rdz[i] << std::endl;
}
f.close();
exit(1);
}
__global__ void kernelShadePixels(unsigned char* pixels, int numRays, float* rox, float* roy, float* roz, float* rdx, float* rdy, float* rdz)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= numRays)
{
return;
}
float3 dir = (make_float3(rdx[idx], rdy[idx], rdz[idx]));
pixels[idx*3+0] = dir.x * 255;
pixels[idx*3+1] = dir.y * 255;
pixels[idx*3+2] = 0;
}
void shadePixels(unsigned int pixelBufferID)
{
if(!s_pixelsResource)
{
hipGraphicsGLRegisterBuffer(&s_pixelsResource, pixelBufferID, hipGraphicsMapFlagsWriteDiscard);
}
unsigned char* pixels = 0;
hipGraphicsMapResources(1, &s_pixelsResource);
size_t num_bytes = 0;
hipGraphicsResourceGetMappedPointer((void**)&pixels, &num_bytes, s_pixelsResource);
const int numBlocks = ceilf(((float)(s_rays.count)/(float)SHADE_BLOCKSIZE));
hipLaunchKernelGGL(( kernelShadePixels), dim3(numBlocks), dim3(SHADE_BLOCKSIZE), 0, 0, pixels, g_camera.nu*g_camera.nv, d_rox, d_roy, d_roz, d_rdx, d_rdy, d_rdz);
hipGraphicsUnmapResources(1, &s_pixelsResource);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
int round4(int a)
{
return (a + 3) & ~3;
}
void reshapeCB(int w, int h)
{
g_canvas.w = w;
g_canvas.h = h;
s_rays.count = round4(g_canvas.w * g_canvas.h);
raySetInitialize(s_rays);
hipFree(d_rox);
hipFree(d_roy);
hipFree(d_roz);
hipFree(d_rdx);
hipFree(d_rdy);
hipFree(d_rdz);
hipFree(d_tmax);
hipFree(d_id);
hipFree(d_hit);
hipMalloc(&d_rox, s_rays.count*sizeof(float));
hipMalloc(&d_roy, s_rays.count*sizeof(float));
hipMalloc(&d_roz, s_rays.count*sizeof(float));
hipMalloc(&d_rdx, s_rays.count*sizeof(float));
hipMalloc(&d_rdy, s_rays.count*sizeof(float));
hipMalloc(&d_rdz, s_rays.count*sizeof(float));
hipMalloc(&d_tmax, s_rays.count*sizeof(float));
hipMalloc(&d_id, s_rays.count*sizeof(int));
hipMalloc(&d_hit, s_rays.count*sizeof(int));
}
void cameraCB(float* peye, float* pcenter, float* pup)
{
const float3 eye = make_float3(peye[0], peye[1], peye[2]);
const float3 center = make_float3(pcenter[0], pcenter[1], pcenter[2]);
const float3 up = make_float3(pup[0], pup[1], pup[2]);
// store position
g_camera.position = eye;
// pre-computations
float invHeight = 1.0f / g_canvas.h;
float invWidth = 1.0f / g_canvas.w;
// compute camera basis
float3 axisW = normalize(eye - center);
float3 axisV = normalize(up);
float3 axisU = cross(axisV, axisW);
// compute half scale factors for each basis vector
float sw = g_canvas.w * 0.01f; // try to keep directions around zero in floating-point value
float sv = sw * std::tan(0.523598775f); // half 60o in radians
float su = sv * g_canvas.w * invHeight;
// scale each vector
axisW *= sw;
axisV *= sv;
axisU *= su;
// store final direction
g_camera.lowerLeftDir = - axisU - axisV - axisW;
// compute full scales
axisV *= 2.0f;
axisU *= 2.0f;
// interpolation deltas
g_camera.dv = axisV * invHeight - axisU; // also goes back to start of u-axis
g_camera.du = axisU * invWidth;
// number of pixels in U and V directions
g_camera.nu = g_canvas.w;
g_camera.nv = g_canvas.h;
}
void renderCB(unsigned int pixelBufferID)
{
generatePrimary();
trace(s_tris.bt, s_tris.bc, 0, s_tris.count, s_rays.count);
shadePixels(pixelBufferID);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
static std::vector<float3> vertices;
static std::vector<int> elements;
float& at(float3& v, int i)
{
switch(i)
{
case 0: return v.x;
case 1: return v.y;
default: return v.z;
}
}
int vertex_cb(p_ply_argument argument)
{
long id;
ply_get_argument_user_data(argument, NULL, &id);
if(id == 0) vertices.resize(vertices.size()+1);
at(vertices.back(), id) = ply_get_argument_value(argument) * 50;
return 1;
}
int face_cb(p_ply_argument argument)
{
long length, value_index;
ply_get_argument_property(argument, NULL, &length, &value_index);
if(value_index >= 0 && value_index <= 2) elements.push_back(ply_get_argument_value(argument));
return 1;
}
void loadSceneBunny()
{
p_ply ply = ply_open("/home/potato/Downloads/bunny.ply", NULL, 0, NULL);
if (!ply) exit(1);
if (!ply_read_header(ply)) exit(1);
long nvertices = ply_set_read_cb(ply, "vertex", "x", vertex_cb, NULL, 0);
ply_set_read_cb(ply, "vertex", "y", vertex_cb, NULL, 1);
ply_set_read_cb(ply, "vertex", "z", vertex_cb, NULL, 2);
long ntriangles = ply_set_read_cb(ply, "face", "vertex_indices", face_cb, NULL, 0);
if (!ply_read(ply)) exit(1);
ply_close(ply);
int count = elements.size()/3;
s_tris.count = round4(count);
s_tris.v0x = amalloc<float>(s_tris.count);
s_tris.v0y = amalloc<float>(s_tris.count);
s_tris.v0z = amalloc<float>(s_tris.count);
s_tris.v1x = amalloc<float>(s_tris.count);
s_tris.v1y = amalloc<float>(s_tris.count);
s_tris.v1z = amalloc<float>(s_tris.count);
s_tris.v2x = amalloc<float>(s_tris.count);
s_tris.v2y = amalloc<float>(s_tris.count);
s_tris.v2z = amalloc<float>(s_tris.count);
s_tris.n0x = amalloc<float>(s_tris.count);
s_tris.n0y = amalloc<float>(s_tris.count);
s_tris.n0z = amalloc<float>(s_tris.count);
s_tris.n1x = amalloc<float>(s_tris.count);
s_tris.n1y = amalloc<float>(s_tris.count);
s_tris.n1z = amalloc<float>(s_tris.count);
s_tris.n2x = amalloc<float>(s_tris.count);
s_tris.n2y = amalloc<float>(s_tris.count);
s_tris.n2z = amalloc<float>(s_tris.count);
std::vector<float3> normals(vertices.size());
for(unsigned int e = 0; e < elements.size(); e+=3)
{
const int e0 = elements[e+0];
const int e1 = elements[e+1];
const int e2 = elements[e+2];
float3 v0 = vertices[e0];
float3 v1 = vertices[e1];
float3 v2 = vertices[e2];
float3 n = cross(v1-v0, v2-v0);
normals[e0] += n;
normals[e1] += n;
normals[e2] += n;
}
int dst = 0;
for(unsigned int e = 0; e < elements.size(); e+=3)
{
const int e0 = elements[e+0];
const int e1 = elements[e+1];
const int e2 = elements[e+2];
float3 v0 = vertices[e0];
float3 v1 = vertices[e1];
float3 v2 = vertices[e2];
s_tris.v0x[dst] = v0.x;
s_tris.v0y[dst] = v0.y;
s_tris.v0z[dst] = v0.z;
s_tris.v1x[dst] = v1.x;
s_tris.v1y[dst] = v1.y;
s_tris.v1z[dst] = v1.z;
s_tris.v2x[dst] = v2.x;
s_tris.v2y[dst] = v2.y;
s_tris.v2z[dst] = v2.z;
float3 n0 = normalize(normals[e0]);
float3 n1 = normalize(normals[e1]);
float3 n2 = normalize(normals[e2]);
s_tris.n0x[dst] = n0.x;
s_tris.n0y[dst] = n0.y;
s_tris.n0z[dst] = n0.z;
s_tris.n1x[dst] = n1.x;
s_tris.n1y[dst] = n1.y;
s_tris.n1z[dst] = n1.z;
s_tris.n2x[dst] = n2.x;
s_tris.n2y[dst] = n2.y;
s_tris.n2z[dst] = n2.z;
++dst;
}
std::fill_n(s_tris.v0x + s_tris.count, s_tris.count - count, s_tris.v0x[s_tris.count-1]);
std::fill_n(s_tris.v0y + s_tris.count, s_tris.count - count, s_tris.v0y[s_tris.count-1]);
std::fill_n(s_tris.v0z + s_tris.count, s_tris.count - count, s_tris.v0z[s_tris.count-1]);
std::fill_n(s_tris.v1x + s_tris.count, s_tris.count - count, s_tris.v1x[s_tris.count-1]);
std::fill_n(s_tris.v1y + s_tris.count, s_tris.count - count, s_tris.v1y[s_tris.count-1]);
std::fill_n(s_tris.v1z + s_tris.count, s_tris.count - count, s_tris.v1z[s_tris.count-1]);
std::fill_n(s_tris.v2x + s_tris.count, s_tris.count - count, s_tris.v2x[s_tris.count-1]);
std::fill_n(s_tris.v2y + s_tris.count, s_tris.count - count, s_tris.v2y[s_tris.count-1]);
std::fill_n(s_tris.v2z + s_tris.count, s_tris.count - count, s_tris.v2z[s_tris.count-1]);
}
void endLoadScene()
{
triSetInitialize(s_tris);
float3 btmin = make_float3(s_tris.bt.minx[0], s_tris.bt.miny[0], s_tris.bt.minz[0]);
float3 btmax = make_float3(s_tris.bt.maxx[0], s_tris.bt.maxy[0], s_tris.bt.maxz[0]);
float3 center = (btmin + btmax) * 0.5f;
float3 eye = center + make_float3(0,0,10);
float3 up = make_float3(0,1,0);
rtvSetCamera(&eye.x, ¢er.x, &up.x);
}
int main()
{
rtvInit(1024, 1024);
rtvSetReshapeCallback(reshapeCB);
rtvSetCameraCallback(cameraCB);
rtvSetBufferRenderCallback(renderCB);
loadSceneBunny();
endLoadScene();
rtvExec();
return 0;
}
|
5c19e4c8b66760ecbf5e108c4e452c34de3205fb.cu
|
#include "types.h"
#include "dacrt.h"
#include "rtv.h"
#include "rply.h"
#include "timer.h"
#include "my_malloc.h"
#include <cfloat>
#include <vector>
#include <helper_math.h>
#include <cuda_gl_interop.h>
#include <iostream>
#include <fstream>
#define GENERATE_BLOCKSIZE 16
#define SHADE_BLOCKSIZE 256
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
static TriSet s_tris;
static RaySet s_rays;
static float* d_rox = 0;
static float* d_roy = 0;
static float* d_roz = 0;
static float* d_rdx = 0;
static float* d_rdy = 0;
static float* d_rdz = 0;
static int* d_id = 0;
static float* d_tmax = 0;
static int* d_hit = 0;
struct cudaGraphicsResource* s_pixelsResource = 0;
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
struct Canvas
{
int w;
int h;
};
struct Camera
{
float3 position;
float3 lowerLeftDir;
float3 du;
float3 dv;
int nu;
int nv;
};
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
static Canvas g_canvas;
static Camera g_camera;
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
__global__ void kernelGeneratePrimary(float3 camPos, float3 lowerLeftDir, float3 du, float3 dv, int numRaysX, int numRaysY,
float* rox, float* roy, float* roz, float* rdx, float* rdy, float* rdz, int* id, float* tmax, int* hit)
{
const int tx = blockIdx.x * blockDim.x + threadIdx.x;
const int ty = blockIdx.y * blockDim.y + threadIdx.y;
if(tx >= numRaysX || ty >= numRaysY)
{
return;
}
const int idx = ty * numRaysX + tx;
rox[idx] = camPos.x;
roy[idx] = camPos.y;
roz[idx] = camPos.z;
float3 dir = lowerLeftDir + du * tx + dv * ty + du * numRaysX * ty;
rdx[idx] = dir.x;
rdy[idx] = dir.y;
rdz[idx] = dir.z;
tmax[idx] = FLT_MAX;
id[idx] = idx;
hit[idx] = -1;
}
void generatePrimary()
{
dim3 numBlocks;
numBlocks.x = ceilf(((float)g_camera.nu/(float)GENERATE_BLOCKSIZE));
numBlocks.y = ceilf(((float)g_camera.nv/(float)GENERATE_BLOCKSIZE));
dim3 blockSize;
blockSize.x = GENERATE_BLOCKSIZE;
blockSize.y = GENERATE_BLOCKSIZE;
kernelGeneratePrimary<<<numBlocks, blockSize>>>(g_camera.position, g_camera.lowerLeftDir, g_camera.du, g_camera.dv, g_camera.nu, g_camera.nv,
d_rox, d_roy, d_roz, d_rdx, d_rdy, d_rdz, d_id, d_tmax, d_hit);
float* h_rdx = new float[s_rays.count];
float* h_rdy = new float[s_rays.count];
float* h_rdz = new float[s_rays.count];
cudaMemcpy(h_rdx, d_rdx, s_rays.count*sizeof(float), cudaMemcpyDefault);
cudaMemcpy(h_rdy, d_rdy, s_rays.count*sizeof(float), cudaMemcpyDefault);
cudaMemcpy(h_rdy, d_rdy, s_rays.count*sizeof(float), cudaMemcpyDefault);
std::ofstream f("out.txt");
for(int i = 0; i < s_rays.count; ++i)
{
f << h_rdx[i] << ", " << h_rdy[i] << ", " << h_rdz[i] << std::endl;
}
f.close();
exit(1);
}
__global__ void kernelShadePixels(unsigned char* pixels, int numRays, float* rox, float* roy, float* roz, float* rdx, float* rdy, float* rdz)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx >= numRays)
{
return;
}
float3 dir = (make_float3(rdx[idx], rdy[idx], rdz[idx]));
pixels[idx*3+0] = dir.x * 255;
pixels[idx*3+1] = dir.y * 255;
pixels[idx*3+2] = 0;
}
void shadePixels(unsigned int pixelBufferID)
{
if(!s_pixelsResource)
{
cudaGraphicsGLRegisterBuffer(&s_pixelsResource, pixelBufferID, cudaGraphicsMapFlagsWriteDiscard);
}
unsigned char* pixels = 0;
cudaGraphicsMapResources(1, &s_pixelsResource);
size_t num_bytes = 0;
cudaGraphicsResourceGetMappedPointer((void**)&pixels, &num_bytes, s_pixelsResource);
const int numBlocks = ceilf(((float)(s_rays.count)/(float)SHADE_BLOCKSIZE));
kernelShadePixels<<<numBlocks, SHADE_BLOCKSIZE, 0>>>(pixels, g_camera.nu*g_camera.nv, d_rox, d_roy, d_roz, d_rdx, d_rdy, d_rdz);
cudaGraphicsUnmapResources(1, &s_pixelsResource);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
int round4(int a)
{
return (a + 3) & ~3;
}
void reshapeCB(int w, int h)
{
g_canvas.w = w;
g_canvas.h = h;
s_rays.count = round4(g_canvas.w * g_canvas.h);
raySetInitialize(s_rays);
cudaFree(d_rox);
cudaFree(d_roy);
cudaFree(d_roz);
cudaFree(d_rdx);
cudaFree(d_rdy);
cudaFree(d_rdz);
cudaFree(d_tmax);
cudaFree(d_id);
cudaFree(d_hit);
cudaMalloc(&d_rox, s_rays.count*sizeof(float));
cudaMalloc(&d_roy, s_rays.count*sizeof(float));
cudaMalloc(&d_roz, s_rays.count*sizeof(float));
cudaMalloc(&d_rdx, s_rays.count*sizeof(float));
cudaMalloc(&d_rdy, s_rays.count*sizeof(float));
cudaMalloc(&d_rdz, s_rays.count*sizeof(float));
cudaMalloc(&d_tmax, s_rays.count*sizeof(float));
cudaMalloc(&d_id, s_rays.count*sizeof(int));
cudaMalloc(&d_hit, s_rays.count*sizeof(int));
}
void cameraCB(float* peye, float* pcenter, float* pup)
{
const float3 eye = make_float3(peye[0], peye[1], peye[2]);
const float3 center = make_float3(pcenter[0], pcenter[1], pcenter[2]);
const float3 up = make_float3(pup[0], pup[1], pup[2]);
// store position
g_camera.position = eye;
// pre-computations
float invHeight = 1.0f / g_canvas.h;
float invWidth = 1.0f / g_canvas.w;
// compute camera basis
float3 axisW = normalize(eye - center);
float3 axisV = normalize(up);
float3 axisU = cross(axisV, axisW);
// compute half scale factors for each basis vector
float sw = g_canvas.w * 0.01f; // try to keep directions around zero in floating-point value
float sv = sw * std::tan(0.523598775f); // half 60o in radians
float su = sv * g_canvas.w * invHeight;
// scale each vector
axisW *= sw;
axisV *= sv;
axisU *= su;
// store final direction
g_camera.lowerLeftDir = - axisU - axisV - axisW;
// compute full scales
axisV *= 2.0f;
axisU *= 2.0f;
// interpolation deltas
g_camera.dv = axisV * invHeight - axisU; // also goes back to start of u-axis
g_camera.du = axisU * invWidth;
// number of pixels in U and V directions
g_camera.nu = g_canvas.w;
g_camera.nv = g_canvas.h;
}
void renderCB(unsigned int pixelBufferID)
{
generatePrimary();
trace(s_tris.bt, s_tris.bc, 0, s_tris.count, s_rays.count);
shadePixels(pixelBufferID);
}
// ----------------------------------------------------------------------------------------------------------------------------------------
// ----------------------------------------------------------------------------------------------------------------------------------------
static std::vector<float3> vertices;
static std::vector<int> elements;
float& at(float3& v, int i)
{
switch(i)
{
case 0: return v.x;
case 1: return v.y;
default: return v.z;
}
}
int vertex_cb(p_ply_argument argument)
{
long id;
ply_get_argument_user_data(argument, NULL, &id);
if(id == 0) vertices.resize(vertices.size()+1);
at(vertices.back(), id) = ply_get_argument_value(argument) * 50;
return 1;
}
int face_cb(p_ply_argument argument)
{
long length, value_index;
ply_get_argument_property(argument, NULL, &length, &value_index);
if(value_index >= 0 && value_index <= 2) elements.push_back(ply_get_argument_value(argument));
return 1;
}
void loadSceneBunny()
{
p_ply ply = ply_open("/home/potato/Downloads/bunny.ply", NULL, 0, NULL);
if (!ply) exit(1);
if (!ply_read_header(ply)) exit(1);
long nvertices = ply_set_read_cb(ply, "vertex", "x", vertex_cb, NULL, 0);
ply_set_read_cb(ply, "vertex", "y", vertex_cb, NULL, 1);
ply_set_read_cb(ply, "vertex", "z", vertex_cb, NULL, 2);
long ntriangles = ply_set_read_cb(ply, "face", "vertex_indices", face_cb, NULL, 0);
if (!ply_read(ply)) exit(1);
ply_close(ply);
int count = elements.size()/3;
s_tris.count = round4(count);
s_tris.v0x = amalloc<float>(s_tris.count);
s_tris.v0y = amalloc<float>(s_tris.count);
s_tris.v0z = amalloc<float>(s_tris.count);
s_tris.v1x = amalloc<float>(s_tris.count);
s_tris.v1y = amalloc<float>(s_tris.count);
s_tris.v1z = amalloc<float>(s_tris.count);
s_tris.v2x = amalloc<float>(s_tris.count);
s_tris.v2y = amalloc<float>(s_tris.count);
s_tris.v2z = amalloc<float>(s_tris.count);
s_tris.n0x = amalloc<float>(s_tris.count);
s_tris.n0y = amalloc<float>(s_tris.count);
s_tris.n0z = amalloc<float>(s_tris.count);
s_tris.n1x = amalloc<float>(s_tris.count);
s_tris.n1y = amalloc<float>(s_tris.count);
s_tris.n1z = amalloc<float>(s_tris.count);
s_tris.n2x = amalloc<float>(s_tris.count);
s_tris.n2y = amalloc<float>(s_tris.count);
s_tris.n2z = amalloc<float>(s_tris.count);
std::vector<float3> normals(vertices.size());
for(unsigned int e = 0; e < elements.size(); e+=3)
{
const int e0 = elements[e+0];
const int e1 = elements[e+1];
const int e2 = elements[e+2];
float3 v0 = vertices[e0];
float3 v1 = vertices[e1];
float3 v2 = vertices[e2];
float3 n = cross(v1-v0, v2-v0);
normals[e0] += n;
normals[e1] += n;
normals[e2] += n;
}
int dst = 0;
for(unsigned int e = 0; e < elements.size(); e+=3)
{
const int e0 = elements[e+0];
const int e1 = elements[e+1];
const int e2 = elements[e+2];
float3 v0 = vertices[e0];
float3 v1 = vertices[e1];
float3 v2 = vertices[e2];
s_tris.v0x[dst] = v0.x;
s_tris.v0y[dst] = v0.y;
s_tris.v0z[dst] = v0.z;
s_tris.v1x[dst] = v1.x;
s_tris.v1y[dst] = v1.y;
s_tris.v1z[dst] = v1.z;
s_tris.v2x[dst] = v2.x;
s_tris.v2y[dst] = v2.y;
s_tris.v2z[dst] = v2.z;
float3 n0 = normalize(normals[e0]);
float3 n1 = normalize(normals[e1]);
float3 n2 = normalize(normals[e2]);
s_tris.n0x[dst] = n0.x;
s_tris.n0y[dst] = n0.y;
s_tris.n0z[dst] = n0.z;
s_tris.n1x[dst] = n1.x;
s_tris.n1y[dst] = n1.y;
s_tris.n1z[dst] = n1.z;
s_tris.n2x[dst] = n2.x;
s_tris.n2y[dst] = n2.y;
s_tris.n2z[dst] = n2.z;
++dst;
}
std::fill_n(s_tris.v0x + s_tris.count, s_tris.count - count, s_tris.v0x[s_tris.count-1]);
std::fill_n(s_tris.v0y + s_tris.count, s_tris.count - count, s_tris.v0y[s_tris.count-1]);
std::fill_n(s_tris.v0z + s_tris.count, s_tris.count - count, s_tris.v0z[s_tris.count-1]);
std::fill_n(s_tris.v1x + s_tris.count, s_tris.count - count, s_tris.v1x[s_tris.count-1]);
std::fill_n(s_tris.v1y + s_tris.count, s_tris.count - count, s_tris.v1y[s_tris.count-1]);
std::fill_n(s_tris.v1z + s_tris.count, s_tris.count - count, s_tris.v1z[s_tris.count-1]);
std::fill_n(s_tris.v2x + s_tris.count, s_tris.count - count, s_tris.v2x[s_tris.count-1]);
std::fill_n(s_tris.v2y + s_tris.count, s_tris.count - count, s_tris.v2y[s_tris.count-1]);
std::fill_n(s_tris.v2z + s_tris.count, s_tris.count - count, s_tris.v2z[s_tris.count-1]);
}
void endLoadScene()
{
triSetInitialize(s_tris);
float3 btmin = make_float3(s_tris.bt.minx[0], s_tris.bt.miny[0], s_tris.bt.minz[0]);
float3 btmax = make_float3(s_tris.bt.maxx[0], s_tris.bt.maxy[0], s_tris.bt.maxz[0]);
float3 center = (btmin + btmax) * 0.5f;
float3 eye = center + make_float3(0,0,10);
float3 up = make_float3(0,1,0);
rtvSetCamera(&eye.x, ¢er.x, &up.x);
}
int main()
{
rtvInit(1024, 1024);
rtvSetReshapeCallback(reshapeCB);
rtvSetCameraCallback(cameraCB);
rtvSetBufferRenderCallback(renderCB);
loadSceneBunny();
endLoadScene();
rtvExec();
return 0;
}
|
bc2dc591eb23e227dc045bd377b4262edbc8a4d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright 2019 by XGBoost Contributors
*
* \file simple_csr_source.cuh
* \brief An extension for the simple CSR source in-memory data structure to accept
* foreign columnar.
*/
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <xgboost/base.h>
#include <xgboost/data.h>
#include <vector>
#include <algorithm>
#include "simple_csr_source.h"
#include "columnar.h"
#include "../common/bitfield.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace data {
template <size_t kBlockThreads>
__global__ void CountValidKernel(common::Span<Columnar const> columns,
int32_t const n_rows,
common::Span<size_t> offsets) {
// One block for a column
auto const bid = blockIdx.x;
auto const tid = threadIdx.x;
if (bid >= columns.size()) {
return;
}
RBitField8 const mask = columns[bid].valid;
for (auto r = tid; r < n_rows; r += kBlockThreads) {
if (mask.Data() == nullptr || mask.Check(r)) {
atomicAdd(reinterpret_cast<BitFieldAtomicType*>(&offsets[r+1]),
static_cast<BitFieldAtomicType>(1));
}
}
}
__global__ void CreateCSRKernel(Columnar const column,
int32_t colid,
common::Span<size_t> offsets,
common::Span<Entry> out_data) {
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
if (column.size <= tid) {
return;
}
if (column.valid.Data() == nullptr || column.valid.Check(tid)) {
int32_t oid = offsets[tid];
out_data[oid].fvalue = column.data[tid];
out_data[oid].index = colid;
offsets[tid] += 1;
}
}
void SimpleCSRSource::FromDeviceColumnar(std::vector<Columnar> cols) {
uint64_t const n_cols = cols.size();
uint64_t const n_rows = cols[0].size;
auto ptr = cols[0].data.data();
int32_t device = dh::CudaGetPointerDevice(ptr);
CHECK_NE(device, -1);
for (int32_t i = 1; i < n_cols; ++i) {
auto ptr = cols[i].data.data();
int32_t ptr_device = dh::CudaGetPointerDevice(ptr);
CHECK_EQ(device, ptr_device)
<< "GPU ID at 0^th column: " << device << ", "
<< "GPU ID at column " << i << ": " << ptr_device;
}
dh::safe_cuda(hipSetDevice(device));
page_.offset.SetDevice(device);
page_.offset.Resize(info.num_row_ + 1);
page_.data.SetDevice(device);
page_.data.Resize(info.num_nonzero_);
auto s_data = page_.data.DeviceSpan();
auto s_offsets = page_.offset.DeviceSpan();
CHECK_EQ(s_offsets.size(), n_rows + 1);
int32_t constexpr kThreads = 256;
dh::device_vector<Columnar> d_cols(cols);
auto s_d_cols = dh::ToSpan(d_cols);
dh::safe_cuda(hipMemset(s_offsets.data(), 0, sizeof(int32_t) * (n_rows + 1)));
hipLaunchKernelGGL(( CountValidKernel<kThreads>), dim3(n_cols), dim3(kThreads), 0, 0, s_d_cols, n_rows, s_offsets);
thrust::device_ptr<size_t> p_offsets(s_offsets.data());
CHECK_GE(s_offsets.size(), n_rows + 1);
thrust::inclusive_scan(p_offsets, p_offsets + n_rows + 1, p_offsets);
// Created for building csr matrix, where we need to change index
// after processing each column.
dh::device_vector<size_t> tmp_offset(page_.offset.Size());
thrust::copy(p_offsets, p_offsets + n_rows + 1, tmp_offset.begin());
int32_t kBlocks = common::DivRoundUp(n_rows, kThreads);
for (size_t col = 0; col < n_cols; ++col) {
hipLaunchKernelGGL(( CreateCSRKernel), dim3(kBlocks), dim3(kThreads), 0, 0, d_cols[col], col, dh::ToSpan(tmp_offset), s_data);
}
}
} // namespace data
} // namespace xgboost
|
bc2dc591eb23e227dc045bd377b4262edbc8a4d6.cu
|
/*!
* Copyright 2019 by XGBoost Contributors
*
* \file simple_csr_source.cuh
* \brief An extension for the simple CSR source in-memory data structure to accept
* foreign columnar.
*/
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#include <thrust/scan.h>
#include <xgboost/base.h>
#include <xgboost/data.h>
#include <vector>
#include <algorithm>
#include "simple_csr_source.h"
#include "columnar.h"
#include "../common/bitfield.h"
#include "../common/device_helpers.cuh"
namespace xgboost {
namespace data {
template <size_t kBlockThreads>
__global__ void CountValidKernel(common::Span<Columnar const> columns,
int32_t const n_rows,
common::Span<size_t> offsets) {
// One block for a column
auto const bid = blockIdx.x;
auto const tid = threadIdx.x;
if (bid >= columns.size()) {
return;
}
RBitField8 const mask = columns[bid].valid;
for (auto r = tid; r < n_rows; r += kBlockThreads) {
if (mask.Data() == nullptr || mask.Check(r)) {
atomicAdd(reinterpret_cast<BitFieldAtomicType*>(&offsets[r+1]),
static_cast<BitFieldAtomicType>(1));
}
}
}
__global__ void CreateCSRKernel(Columnar const column,
int32_t colid,
common::Span<size_t> offsets,
common::Span<Entry> out_data) {
auto tid = threadIdx.x + blockDim.x * blockIdx.x;
if (column.size <= tid) {
return;
}
if (column.valid.Data() == nullptr || column.valid.Check(tid)) {
int32_t oid = offsets[tid];
out_data[oid].fvalue = column.data[tid];
out_data[oid].index = colid;
offsets[tid] += 1;
}
}
void SimpleCSRSource::FromDeviceColumnar(std::vector<Columnar> cols) {
uint64_t const n_cols = cols.size();
uint64_t const n_rows = cols[0].size;
auto ptr = cols[0].data.data();
int32_t device = dh::CudaGetPointerDevice(ptr);
CHECK_NE(device, -1);
for (int32_t i = 1; i < n_cols; ++i) {
auto ptr = cols[i].data.data();
int32_t ptr_device = dh::CudaGetPointerDevice(ptr);
CHECK_EQ(device, ptr_device)
<< "GPU ID at 0^th column: " << device << ", "
<< "GPU ID at column " << i << ": " << ptr_device;
}
dh::safe_cuda(cudaSetDevice(device));
page_.offset.SetDevice(device);
page_.offset.Resize(info.num_row_ + 1);
page_.data.SetDevice(device);
page_.data.Resize(info.num_nonzero_);
auto s_data = page_.data.DeviceSpan();
auto s_offsets = page_.offset.DeviceSpan();
CHECK_EQ(s_offsets.size(), n_rows + 1);
int32_t constexpr kThreads = 256;
dh::device_vector<Columnar> d_cols(cols);
auto s_d_cols = dh::ToSpan(d_cols);
dh::safe_cuda(cudaMemset(s_offsets.data(), 0, sizeof(int32_t) * (n_rows + 1)));
CountValidKernel<kThreads><<<n_cols, kThreads>>>(s_d_cols, n_rows, s_offsets);
thrust::device_ptr<size_t> p_offsets(s_offsets.data());
CHECK_GE(s_offsets.size(), n_rows + 1);
thrust::inclusive_scan(p_offsets, p_offsets + n_rows + 1, p_offsets);
// Created for building csr matrix, where we need to change index
// after processing each column.
dh::device_vector<size_t> tmp_offset(page_.offset.Size());
thrust::copy(p_offsets, p_offsets + n_rows + 1, tmp_offset.begin());
int32_t kBlocks = common::DivRoundUp(n_rows, kThreads);
for (size_t col = 0; col < n_cols; ++col) {
CreateCSRKernel<<<kBlocks, kThreads>>>(d_cols[col], col, dh::ToSpan(tmp_offset), s_data);
}
}
} // namespace data
} // namespace xgboost
|
0c226cede568d4a3c7c739a2db7c07c003574a3a.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/iterator/discard_iterator.h>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cuspatial/error.hpp>
#include <cuspatial/trajectory.hpp>
namespace cuspatial {
namespace {
struct dispatch_element {
template <typename Element>
std::enable_if_t<std::is_floating_point<Element>::value, std::unique_ptr<cudf::table>> operator()(
cudf::size_type num_trajectories,
cudf::column_view const& object_id,
cudf::column_view const& x,
cudf::column_view const& y,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
auto policy = rmm::exec_policy(stream);
// Construct output columns
auto type = cudf::data_type{cudf::type_to_id<Element>()};
std::vector<std::unique_ptr<cudf::column>> cols{};
cols.reserve(4);
// allocate bbox_x1 output column
cols.push_back(
cudf::make_numeric_column(type, num_trajectories, cudf::mask_state::UNALLOCATED, stream, mr));
// allocate bbox_y1 output column
cols.push_back(
cudf::make_numeric_column(type, num_trajectories, cudf::mask_state::UNALLOCATED, stream, mr));
// allocate bbox_x2 output column
cols.push_back(
cudf::make_numeric_column(type, num_trajectories, cudf::mask_state::UNALLOCATED, stream, mr));
// allocate bbox_y2 output column
cols.push_back(
cudf::make_numeric_column(type, num_trajectories, cudf::mask_state::UNALLOCATED, stream, mr));
auto points = thrust::make_zip_iterator(thrust::make_tuple(
x.begin<Element>(), y.begin<Element>(), x.begin<Element>(), y.begin<Element>()));
auto bboxes = thrust::make_zip_iterator(
thrust::make_tuple(cols.at(0)->mutable_view().begin<Element>(), // bbox_x1
cols.at(1)->mutable_view().begin<Element>(), // bbox_y1
cols.at(2)->mutable_view().begin<Element>(), // bbox_x2
cols.at(3)->mutable_view().begin<Element>()) // bbox_y2
);
thrust::fill(policy->on(stream),
bboxes,
bboxes + num_trajectories,
thrust::make_tuple(std::numeric_limits<Element>::max(),
std::numeric_limits<Element>::max(),
std::numeric_limits<Element>::min(),
std::numeric_limits<Element>::min()));
thrust::reduce_by_key(
policy->on(stream), // execution policy
object_id.begin<int32_t>(), // keys_first
object_id.end<int32_t>(), // keys_last
points, // values_first
thrust::make_discard_iterator(), // keys_output
bboxes, // values_output
thrust::equal_to<int32_t>(), // binary_pred
[] __device__(auto a, auto b) { // binary_op
Element x1, y1, x2, y2, x3, y3, x4, y4;
thrust::tie(x1, y1, x2, y2) = a;
thrust::tie(x3, y3, x4, y4) = b;
return thrust::make_tuple(
min(min(x1, x2), x3), min(min(y1, y2), y3), max(max(x1, x2), x4), max(max(y1, y2), y4));
});
// check for errors
CHECK_CUDA(stream);
return std::make_unique<cudf::table>(std::move(cols));
}
template <typename Element>
std::enable_if_t<not std::is_floating_point<Element>::value, std::unique_ptr<cudf::table>>
operator()(cudf::size_type num_trajectories,
cudf::column_view const& object_id,
cudf::column_view const& x,
cudf::column_view const& y,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
CUSPATIAL_FAIL("X and Y must be floating point types");
}
};
} // namespace
namespace detail {
std::unique_ptr<cudf::table> trajectory_bounding_boxes(cudf::size_type num_trajectories,
cudf::column_view const& object_id,
cudf::column_view const& x,
cudf::column_view const& y,
rmm::mr::device_memory_resource* mr,
hipStream_t stream)
{
return cudf::type_dispatcher(
x.type(), dispatch_element{}, num_trajectories, object_id, x, y, mr, stream);
}
} // namespace detail
std::unique_ptr<cudf::table> trajectory_bounding_boxes(cudf::size_type num_trajectories,
cudf::column_view const& object_id,
cudf::column_view const& x,
cudf::column_view const& y,
rmm::mr::device_memory_resource* mr)
{
CUSPATIAL_EXPECTS(object_id.size() == x.size() && x.size() == y.size(), "Data size mismatch");
CUSPATIAL_EXPECTS(x.type().id() == y.type().id(), "Data type mismatch");
CUSPATIAL_EXPECTS(object_id.type().id() == cudf::type_id::INT32, "Invalid object_id type");
CUSPATIAL_EXPECTS(!(x.has_nulls() || y.has_nulls() || object_id.has_nulls()),
"NULL support unimplemented");
if (num_trajectories == 0 || object_id.is_empty() || x.is_empty() || y.is_empty()) {
std::vector<std::unique_ptr<cudf::column>> cols{};
cols.reserve(4);
cols.push_back(cudf::empty_like(x));
cols.push_back(cudf::empty_like(y));
cols.push_back(cudf::empty_like(x));
cols.push_back(cudf::empty_like(y));
return std::make_unique<cudf::table>(std::move(cols));
}
return detail::trajectory_bounding_boxes(num_trajectories, object_id, x, y, mr, 0);
}
} // namespace cuspatial
|
0c226cede568d4a3c7c739a2db7c07c003574a3a.cu
|
/*
* Copyright (c) 2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <thrust/iterator/discard_iterator.h>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/column/column_view.hpp>
#include <cudf/copying.hpp>
#include <cudf/detail/utilities/cuda.cuh>
#include <cudf/table/table.hpp>
#include <cudf/utilities/traits.hpp>
#include <cudf/utilities/type_dispatcher.hpp>
#include <cuspatial/error.hpp>
#include <cuspatial/trajectory.hpp>
namespace cuspatial {
namespace {
struct dispatch_element {
template <typename Element>
std::enable_if_t<std::is_floating_point<Element>::value, std::unique_ptr<cudf::table>> operator()(
cudf::size_type num_trajectories,
cudf::column_view const& object_id,
cudf::column_view const& x,
cudf::column_view const& y,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
auto policy = rmm::exec_policy(stream);
// Construct output columns
auto type = cudf::data_type{cudf::type_to_id<Element>()};
std::vector<std::unique_ptr<cudf::column>> cols{};
cols.reserve(4);
// allocate bbox_x1 output column
cols.push_back(
cudf::make_numeric_column(type, num_trajectories, cudf::mask_state::UNALLOCATED, stream, mr));
// allocate bbox_y1 output column
cols.push_back(
cudf::make_numeric_column(type, num_trajectories, cudf::mask_state::UNALLOCATED, stream, mr));
// allocate bbox_x2 output column
cols.push_back(
cudf::make_numeric_column(type, num_trajectories, cudf::mask_state::UNALLOCATED, stream, mr));
// allocate bbox_y2 output column
cols.push_back(
cudf::make_numeric_column(type, num_trajectories, cudf::mask_state::UNALLOCATED, stream, mr));
auto points = thrust::make_zip_iterator(thrust::make_tuple(
x.begin<Element>(), y.begin<Element>(), x.begin<Element>(), y.begin<Element>()));
auto bboxes = thrust::make_zip_iterator(
thrust::make_tuple(cols.at(0)->mutable_view().begin<Element>(), // bbox_x1
cols.at(1)->mutable_view().begin<Element>(), // bbox_y1
cols.at(2)->mutable_view().begin<Element>(), // bbox_x2
cols.at(3)->mutable_view().begin<Element>()) // bbox_y2
);
thrust::fill(policy->on(stream),
bboxes,
bboxes + num_trajectories,
thrust::make_tuple(std::numeric_limits<Element>::max(),
std::numeric_limits<Element>::max(),
std::numeric_limits<Element>::min(),
std::numeric_limits<Element>::min()));
thrust::reduce_by_key(
policy->on(stream), // execution policy
object_id.begin<int32_t>(), // keys_first
object_id.end<int32_t>(), // keys_last
points, // values_first
thrust::make_discard_iterator(), // keys_output
bboxes, // values_output
thrust::equal_to<int32_t>(), // binary_pred
[] __device__(auto a, auto b) { // binary_op
Element x1, y1, x2, y2, x3, y3, x4, y4;
thrust::tie(x1, y1, x2, y2) = a;
thrust::tie(x3, y3, x4, y4) = b;
return thrust::make_tuple(
min(min(x1, x2), x3), min(min(y1, y2), y3), max(max(x1, x2), x4), max(max(y1, y2), y4));
});
// check for errors
CHECK_CUDA(stream);
return std::make_unique<cudf::table>(std::move(cols));
}
template <typename Element>
std::enable_if_t<not std::is_floating_point<Element>::value, std::unique_ptr<cudf::table>>
operator()(cudf::size_type num_trajectories,
cudf::column_view const& object_id,
cudf::column_view const& x,
cudf::column_view const& y,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
CUSPATIAL_FAIL("X and Y must be floating point types");
}
};
} // namespace
namespace detail {
std::unique_ptr<cudf::table> trajectory_bounding_boxes(cudf::size_type num_trajectories,
cudf::column_view const& object_id,
cudf::column_view const& x,
cudf::column_view const& y,
rmm::mr::device_memory_resource* mr,
cudaStream_t stream)
{
return cudf::type_dispatcher(
x.type(), dispatch_element{}, num_trajectories, object_id, x, y, mr, stream);
}
} // namespace detail
std::unique_ptr<cudf::table> trajectory_bounding_boxes(cudf::size_type num_trajectories,
cudf::column_view const& object_id,
cudf::column_view const& x,
cudf::column_view const& y,
rmm::mr::device_memory_resource* mr)
{
CUSPATIAL_EXPECTS(object_id.size() == x.size() && x.size() == y.size(), "Data size mismatch");
CUSPATIAL_EXPECTS(x.type().id() == y.type().id(), "Data type mismatch");
CUSPATIAL_EXPECTS(object_id.type().id() == cudf::type_id::INT32, "Invalid object_id type");
CUSPATIAL_EXPECTS(!(x.has_nulls() || y.has_nulls() || object_id.has_nulls()),
"NULL support unimplemented");
if (num_trajectories == 0 || object_id.is_empty() || x.is_empty() || y.is_empty()) {
std::vector<std::unique_ptr<cudf::column>> cols{};
cols.reserve(4);
cols.push_back(cudf::empty_like(x));
cols.push_back(cudf::empty_like(y));
cols.push_back(cudf::empty_like(x));
cols.push_back(cudf::empty_like(y));
return std::make_unique<cudf::table>(std::move(cols));
}
return detail::trajectory_bounding_boxes(num_trajectories, object_id, x, y, mr, 0);
}
} // namespace cuspatial
|
b9c624c5a53433759b038ca0186dd30abb0f4782.hip
|
// !!! This is a file automatically generated by hipify!!!
const char* QN_MLP_BunchCudaVar_rcsid =
"$Header: /u/drspeech/repos/quicknet2/QN_MLP_BunchCudaVar.cu,v 1.5 2011/05/24 02:03:14 davidj Exp $";
/* Must include the config.h file first */
#include <QN_config.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "QN_types.h"
#include "QN_Logger.h"
#include "QN_CudaUtils.h"
#include "QN_MLP_BunchCudaVar.h"
#include "QN_fltvec.h"
#include "QN_intvec.h"
#include "QN_cuvec.h"
#include <hip/hip_runtime.h>
#include <rocblas.h>
// These appear later but we do not want them in the header file
// __global__ void QN_BunchCudaVar_forward_bunch(QN_BunchCudaVar_Workspace *ws,
// int n_frames);
// __global__ void QN_BunchCudaVar_train_bunch(QN_BunchCudaVar_Workspace *ws,
// int n_frames);
// __device__ void QN_BunchCudaVar_forward_bunch_do(QN_BunchCudaVar_Workspace *ws,
// int n_frames);
float * QN_MLP_BunchCudaVar::Last2_weights()
{
fromdev_vf_vf("weights", 1000 * 2698, dev.weights[n_layers - 1], dev.cache_last2_weights); //cw564 - mbt -- TODO
return dev.cache_last2_weights;
}
float * QN_MLP_BunchCudaVar::Last_y()
{
return dev.cache_raw_last2_y;
}
QN_MLP_BunchCudaVar::QN_MLP_BunchCudaVar(int a_bp_num_layer, //cz277 - nn fea bp
int a_debug,
const char* a_dbgname,
size_t a_n_layers,
const size_t a_layer_units[QN_MLP_MAX_LAYERS],
enum QN_CriterionType a_criteriontype, //cz277 - criteria
enum QN_LayerType *a_layertype, //cz277 - nonlinearity, mul actv
enum QN_OutputLayerType a_outtype,
size_t a_size_bunch,
int device_no, //cz277 - device select
const char *env_var4dev_id) //cz277 - env var
: QN_MLP_BaseFl(a_debug, a_dbgname, "QN_MLP_BunchCudaVar",
a_size_bunch, a_n_layers,
a_layer_units), //cz277 - dnn
bp_num_layer(a_bp_num_layer), //cz277 - nn fea bp
criterion_type(a_criteriontype), //cz277 - criteria
hiddenlayer_types(a_layertype), //cz277 - nonlinearity, mul actv
out_layer_type(a_outtype)
{
size_t i;
// Initialize CUDA if it has not happened already
QN_cuda_init(device_no, env_var4dev_id); //cz277 - device select, env var
// Some stuff so that when things go wrong it is more obvious.
// for (i=0; i<MAX_LAYERS; i++)
// {
// layer_x[i] = NULL;
// layer_y[i] = NULL;
// layer_dedy[i] = NULL;
// layer_dydx[i] = NULL;
// layer_dedx[i] = NULL;
// layer_delta_bias[i] = NULL;
// }
//cz277 - criteria
switch(criterion_type)
{
case QN_CRITERION_QUADRATIC:
case QN_CRITERION_XENTROPY:
break;
default:
clog.error("Failed to create an MLP with an invalid training criterion type.");
}
//cz277 - debug
/*for (int i = 1; i < a_n_layers; ++i)
printf("layer %d, srcaddr = %x, srcval = %d, dstaddr = %x, dstval = %d\n", i, a_layertype, a_layertype[2], hiddenlayer_types, hiddenlayer_types[2]);*/
//cz277 - nonlinearity, mul actv
/*switch(hiddenlayer_type)
{
case QN_LAYER_LINEAR:
case QN_LAYER_SIGMOID:
case QN_LAYER_SOFTMAX:
case QN_LAYER_TANH:
case QN_LAYER_SOFTSIGN:
break;
default:
clog.error("Failed to create an MLP with an invalid hidden layer out type.");
}*/
// Maybe we do not support all output layer types
switch(out_layer_type)
{
case QN_OUTPUT_SIGMOID:
//case QN_OUTPUT_SIGMOID_XENTROPY: //cz277 - nonlinearity
case QN_OUTPUT_TANH:
case QN_OUTPUT_SOFTMAX:
case QN_OUTPUT_LINEAR:
case QN_OUTPUT_SOFTSIGN: //cz277 - nonlinearity
break;
default:
clog.error("Failed to create an MLP with an invalid output layer type.");
}
if (size_bunch == 0)
clog.error("Cannot use a 0 bunch size.");
// Allocate device data structures
size_t in_size = layer_size[0];
size_t out_size = layer_size[n_layers-1];
devnew_vf("in", in_size, &(dev.in));
devnew_vf("out", out_size, &(dev.out));
devnew_vf("targ", out_size, &(dev.targ));
//cz277 - fast softmax
devnew_vf("compcache", out_size, &(dev.compcache));
for (i = 1; i<n_layers; i++)
{
size_t size = layer_size[i];
size_t units = layer_units[i];
devnew_vf("layer_bias", size, &(dev.layer_bias[i]));
devnew_vf("layer_y", size, &(dev.layer_y[i]));
devnew_vf("layer_x", size, &(dev.layer_x[i]));
devnew_vf("layer_dedy", size, &(dev.layer_dedy[i]));
devnew_vf("layer_dydx", size, &(dev.layer_dydx[i]));
devnew_vf("layer_dedx", size, &(dev.layer_dedx[i]));
devnew_vf("layer_delta_bias", units, &(dev.layer_delta_bias[i]));
//cz277 - revisit momentum
devnew_vf("l_bias_delta", size, &(dev.l_bias_delta[i]));
}
// Set up the per-weight-matrix data structures.
for (i = 0; i<n_weightmats; i++)
{
// Note the host weights are alloacted by QN_MLP_BaseFl
size_t n_weights = weights_size[i];
// Allocate device data structures
devnew_vf("weights", n_weights, &dev.weights[i]);
//cz277 - momentum
devnew_vf("weights_delta", n_weights, &dev.weights_delta[i]);
}
clog.log(QN_LOG_PER_RUN, "Created net with %lu layers, bunchsize %lu.",
n_layers, size_bunch);
for (i=0; i<n_layers; i++)
{
clog.log(QN_LOG_PER_RUN, "Layer %lu has %lu units.",
i+1, layer_units[i]);
}
dev_weights_stale = QN_TRUE;
host_weights_stale = QN_FALSE;
dev.cache_raw_last2_y = new float[300000000]; //cw564 - mbt -- TODO
dev.cache_weights = new float[300000000]; //cw564 - mbt -- TODO
dev.cache_last2_weights = new float[300000000];
}
QN_MLP_BunchCudaVar::~QN_MLP_BunchCudaVar()
{
size_t i;
QN_cuda_check();
// Wind down the per-weight-matrix data structures.
for (i = 0; i<n_weightmats; i++)
{
// Deallocate device data structures
devfree_vf("weights", dev.weights[i]);
//cz277 - momentum
devfree_vf("weights_delta", dev.weights_delta[i]);
// Note the host weights are deallocated by QN_MLP_BaseFl
}
// Wind down the per-layer data structures.
for (i = 1; i<n_layers; i++)
{
// delete [] layer_y[i];
// delete [] layer_delta_bias[i];
// delete [] layer_dedx[i];
// delete [] layer_dydx[i];
// delete [] layer_dedy[i];
// delete [] layer_x[i];
// Note the host biases are deallocated by QN_MLP_BaseFl
devfree_vf("layer_delta_bias", dev.layer_delta_bias[i]);
devfree_vf("layer_dedx", dev.layer_dedx[i]);
devfree_vf("layer_dydx", dev.layer_dydx[i]);
devfree_vf("layer_dedy", dev.layer_dedy[i]);
devfree_vf("layer_x", dev.layer_x[i]);
devfree_vf("layer_y", dev.layer_y[i]);
devfree_vf("layer_bias", dev.layer_bias[i]);
//cz277 - revisit momentum
devfree_vf("l_bias_delta", dev.l_bias_delta[i]);
}
devfree_vf("targ", dev.targ);
devfree_vf("out", dev.out);
devfree_vf("in", dev.in);
//cz277 - fast softmax
devfree_vf("compcache", dev.compcache);
//cw564 - mbt
delete [] dev.cache_raw_last2_y;
delete [] dev.cache_weights;
}
void
QN_MLP_BunchCudaVar::forward_bunch(size_t n_frames, const float* in, float* out, const float * * spkr_wgt, const size_t num_basis)
{
// printf("in=%x, out=%x\n", in, out);
// Copy the data across to the device
int in_size = n_frames * layer_units[0];
int out_size = n_frames * layer_units[n_layers-1];
todev_vf_vf("forward_bunch().in", in_size, in, dev.in);
size_t cur_layer; // The index of the current layer.
size_t prev_layer; // The index of the previous layer.
size_t cur_weinum; // The index of the current weight matrix.
size_t cur_layer_units; // The number of units in the current layer.
size_t prev_layer_units; // The number of units in the previous layer.
size_t cur_layer_size; // The size of the current layer.
float* cur_layer_x; // Input to the current layer non-linearity.
float* cur_layer_y; // Output from the current layer
// non-linearity.
float* prev_layer_y; // Output from the previous non-linearity.
float* cur_layer_bias; // Biases for the current layer.
float* cur_weights; // Weights inputing to the current layer.
// Iterate over all of the layers except the input. This is just one
// iteration for 2-layer MLPs.
// Note that layer index starts at 0 for inputlayer, so we start at 1.
for (cur_layer=1; cur_layer<n_layers; cur_layer++)
{
prev_layer = cur_layer - 1;
cur_weinum = cur_layer - 1;
cur_layer_units = layer_units[cur_layer];
prev_layer_units = layer_units[prev_layer];
cur_layer_size = cur_layer_units * n_frames;
cur_layer_x = dev.layer_x[cur_layer];
cur_layer_y = dev.layer_y[cur_layer];
if (cur_layer==1)
prev_layer_y = dev.in;
else if (cur_layer == n_layers - 1) //cw564 - mbt
{
float * h_cache_prev_layer_y = dev.cache_raw_last2_y;
float * d_prev_layer_y = dev.layer_y[prev_layer];
fromdev_vf_vf("mbt.ori_prev_layer_y", prev_layer_units * n_frames,
d_prev_layer_y, h_cache_prev_layer_y);
int old_prev_layer_units = prev_layer_units;
prev_layer_units /= num_basis;
float * h_wsum_prev_layer_y = new float[prev_layer_units * n_frames];
//TODO fast summation
for (int ff = 0; ff < n_frames; ++ ff)
{
for (int now_dim = 0; now_dim < prev_layer_units; ++ now_dim)
{
int new_id = ff * prev_layer_units + now_dim;
h_wsum_prev_layer_y[new_id] = 0;
for (int bb = 0; bb < num_basis; ++ bb)
{
int old_id = ff * old_prev_layer_units + bb * prev_layer_units + now_dim;
h_wsum_prev_layer_y[new_id] += h_cache_prev_layer_y[old_id] * spkr_wgt[ff][bb];
}
}
}
todev_vf_vf("mbt.new_prev_layer_y", prev_layer_units * n_frames,
h_wsum_prev_layer_y, d_prev_layer_y);
delete [] h_wsum_prev_layer_y;
prev_layer_y = d_prev_layer_y;
}
else
prev_layer_y = dev.layer_y[prev_layer];
cur_layer_bias = dev.layer_bias[cur_layer];
cur_weights = dev.weights[cur_weinum];
if (checking)
devcheck("forward_bunch #1");
//cz277 - fast softmax
qn_dev_fastcopy_vf_mf(n_frames, cur_layer_units, cur_layer_bias, cur_layer_x);
//qn_dev_copy_vf_mf(n_frames, cur_layer_units, cur_layer_bias,
// cur_layer_x);
if (checking)
devcheck("forward_bunch #2");
qn_dev_mulntacc_mfmf_mf(n_frames, prev_layer_units, cur_layer_units,
prev_layer_y, cur_weights,
cur_layer_x);
if (checking)
devcheck("forward_bunch #3");
// Check if we are doing things differently for the final layer.
if (cur_layer!=n_layers - 1)
{
//cz277 - nonlinearity, mul actv
switch(hiddenlayer_types[cur_layer])
{
case QN_LAYER_LINEAR:
qn_dev_copy_vf_vf(cur_layer_size, cur_layer_x, cur_layer_y);
break;
case QN_LAYER_SIGMOID:
qn_dev_sigmoid_vf_vf(cur_layer_size, cur_layer_x, cur_layer_y);
break;
case QN_LAYER_SOFTMAX:
qn_dev_multisoftmax_mf_mf(n_frames, cur_layer_units, cur_layer_x, cur_layer_y);
break;
case QN_LAYER_TANH:
qn_dev_tanh_vf_vf(cur_layer_size, cur_layer_x, cur_layer_y);
break;
case QN_LAYER_SOFTSIGN:
qn_dev_softsign_vf_vf(cur_layer_size, cur_layer_x, cur_layer_y);
break;
default:
//cz277 - debug
/*printf("curlayer = %d, curtype = %d, linear = %d, sigmoid = %d, softmax = %d, tanh = %d, softsign = %d\n", cur_layer, hiddenlayer_types[cur_layer], QN_LAYER_LINEAR, QN_LAYER_SIGMOID, QN_LAYER_SOFTMAX, QN_LAYER_TANH, QN_LAYER_SOFTSIGN);*/
assert(0);
}
}
else
{
//cz277 - nonlinear //cz277 - criteria
switch(out_layer_type)
{
case QN_OUTPUT_LINEAR:
qn_dev_copy_vf_vf(cur_layer_size, cur_layer_x, dev.out);
break;
case QN_OUTPUT_SIGMOID:
qn_dev_sigmoid_vf_vf(cur_layer_size, cur_layer_x, dev.out);
break;
case QN_OUTPUT_SOFTMAX:
//qn_dev_multisoftmax_mf_mf(n_frames, cur_layer_units, cur_layer_x, dev.out);
//cz277 - fast softmax
qn_dev_fastsoftmax_mf_mf(n_frames, cur_layer_units, cur_layer_x, dev.compcache, dev.out);
/*fromdev_vf_vf("forward_bunch().out", out_size, cur_layer_x, out);
printf("outsize = %d, in = %e, ", out_size, out[0]);
fromdev_vf_vf("forward_bunch().out", out_size, dev.compcache, out);
fromdev_vf_vf("forward_bunch().out", out_size, dev.out, out);
printf("out = %e\n", out[0]);*/
break;
case QN_OUTPUT_TANH:
qn_dev_tanh_vf_vf(cur_layer_size, cur_layer_x, dev.out);
break;
case QN_OUTPUT_SOFTSIGN:
qn_dev_softsign_vf_vf(cur_layer_size, cur_layer_x, dev.out);
break;
default:
assert(0);
}
}
}
// Copy the data back from the device
fromdev_vf_vf("forward_bunch().out", out_size, dev.out, out);
if (checking)
devcheck("forward_bunch #9");
}
void
QN_MLP_BunchCudaVar::train_bunch(size_t n_frames, const float *in,
const float* target, float* out, const float * * spkr_wgt, const size_t num_basis)
{
// First move forward, which copies over in and out
forward_bunch(n_frames, in, out, spkr_wgt, num_basis);
if (checking)
devcheck("train_bunch #0");
// So we stil have to copy across targ
int out_size = n_frames * layer_units[n_layers-1];
todev_vf_vf("train_bunch().targ", out_size, target, dev.targ);
if (checking)
devcheck("train_bunch #1");
size_t cur_layer; // The index of the current layer.
size_t prev_layer; // The index of the previous layer.
size_t cur_weinum; // The index of the current weight matrix.
size_t cur_layer_units; // The number of units in the current layer.
size_t prev_layer_units; // The number of units in the previous layer.
size_t cur_layer_size; // The size of the current layer.
float* cur_layer_y; // Output from the current layer
// non-linearity.
const float* prev_layer_y; // Output from the previous non-linearity.
float* cur_layer_dydx; // dydx for the current layer.
float* cur_layer_dedy; // dedy for the current layer.
float* prev_layer_dedy; // dedy for the previous layer.
float* cur_layer_dedx; // dedx for the current layer.
float* cur_layer_bias; // Biases for the current layer.
float* cur_layer_delta_bias; // Delta biases for the current layer.
float* cur_weights; // Weights inputing to the current layer.
//cz277 - momentum
float *cur_weights_delta;
//cz277 - revisit momentum
float *cur_l_bias_delta;
//cz277 - nn fea bp
int bp_num_layer_idx = this->bp_num_layer;
//cw564 - mbt
float * z_spkr = new float[n_frames];
for (int ff = 0; ff < n_frames; ++ ff)
{
z_spkr[ff] = 0;
for (int bb = 0; bb < num_basis; ++ bb) z_spkr[ff] += spkr_wgt[ff][bb];
}
// Iterate back over all layers but the first.
for (cur_layer=n_layers-1; cur_layer>0 && (bp_num_layer_idx != 0); cur_layer--, --bp_num_layer_idx) //cz277 - nn fea bp
{
prev_layer = cur_layer - 1;
cur_weinum = cur_layer - 1;
cur_layer_units = layer_units[cur_layer];
prev_layer_units = layer_units[prev_layer];
cur_layer_size = cur_layer_units * n_frames;
//cw564 - mbt
if (cur_layer == n_layers - 2)
{
todev_vf_vf("mbt.raw_cur_layer_last2_y", cur_layer_size,
dev.cache_raw_last2_y, dev.layer_y[cur_layer]);
}
cur_layer_y = dev.layer_y[cur_layer];
if (cur_layer==1)
prev_layer_y = dev.in;
else
prev_layer_y = dev.layer_y[prev_layer];
cur_layer_dydx = dev.layer_dydx[cur_layer];
cur_layer_dedy = dev.layer_dedy[cur_layer];
prev_layer_dedy = dev.layer_dedy[prev_layer];
cur_layer_dedx = dev.layer_dedx[cur_layer];
cur_layer_bias = dev.layer_bias[cur_layer];
cur_layer_delta_bias = dev.layer_delta_bias[cur_layer];
cur_weights = dev.weights[cur_weinum];
//cz277 - momentum
cur_weights_delta = dev.weights_delta[cur_weinum];
//cz277 - revisit momentum
cur_l_bias_delta = dev.l_bias_delta[cur_layer];
float cur_neg_weight_learnrate = neg_weight_learnrate[cur_weinum];
float cur_neg_bias_learnrate = neg_bias_learnrate[cur_layer];
if (cur_layer!=n_layers - 1 && backprop_weights[cur_weinum+1])
{
//cz277 - nonlinearity, mul actv
switch(hiddenlayer_types[cur_layer])
{
case QN_LAYER_LINEAR:
qn_dev_copy_vf_vf(cur_layer_size, cur_layer_dedy, cur_layer_dedx);
break;
case QN_LAYER_SIGMOID:
qn_dev_dsigmoid_vf_vf(cur_layer_size, cur_layer_y, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
break;
case QN_LAYER_SOFTMAX:
qn_dev_dsoftmax_vf_vf(cur_layer_size, cur_layer_y, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
break;
case QN_LAYER_TANH:
qn_dev_dtanh_vf_vf(cur_layer_size, cur_layer_y, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
break;
case QN_LAYER_SOFTSIGN:
qn_dev_dsoftsign_vf_vf(cur_layer_size, cur_layer_y, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
break;
default:
assert(0);
}
}
else
{
//cz277 - nonlinearity //cz277 - criteria
// Going back through the output layer.
switch(out_layer_type)
{
case QN_OUTPUT_LINEAR:
if (criterion_type == QN_CRITERION_QUADRATIC) {
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedx); //dx = dy
} else { //xentropy
qn_dev_dxentropy_vf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedx);
}
break;
case QN_OUTPUT_SIGMOID:
if (criterion_type == QN_CRITERION_QUADRATIC) {
// For a sigmoid layer, de/dx = de/dy . dy/dx
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dsigmoid_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
} else { //xentropy
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedx); //dx = dy
}
break;
case QN_OUTPUT_SOFTMAX:
if (criterion_type == QN_CRITERION_QUADRATIC) {
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dsoftmax_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
} else { //xentropy
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedx); //dx = dy
//qn_dev_dxentropy_vf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
//qn_dev_dsoftmax_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
//qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
}
break;
case QN_OUTPUT_TANH:
if (criterion_type == QN_CRITERION_QUADRATIC) {
// tanh output layer very similar to sigmoid
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dtanh_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
} else { //xentropy
qn_dev_dxentropy_vf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dtanh_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
//qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, cur_layer_dedy, cur_layer_dedx); //wll
}
break;
case QN_OUTPUT_SOFTSIGN:
if (criterion_type == QN_CRITERION_QUADRATIC) {
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dsoftsign_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
} else { //xentropy
qn_dev_dxentropy_vf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dsoftsign_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
}
break;
default:
assert(0);
} // End of output layer type switch.
} // End of special output layer treatment.
// Back propogate error through this layer.
if (cur_layer!=1 && backprop_weights[cur_weinum])
{
//cw564 - mbt
if (cur_layer == n_layers - 1)
prev_layer_units = prev_layer_units / num_basis;
qn_dev_mul_mfmf_mf(n_frames, cur_layer_units, prev_layer_units,
cur_layer_dedx, cur_weights, prev_layer_dedy);
//cw564 - mbt -- prev_layer_dedy
if (cur_layer == n_layers - 1)
{
int size = n_frames * prev_layer_units;
float * h_raw_prev_layer_dedy = new float[size];
float * h_new_prev_layer_dedy = new float[size * num_basis];
fromdev_vf_vf("raw_prev_layer_dedy", size, prev_layer_dedy, h_raw_prev_layer_dedy);
int cnt = 0;
for (int ff = 0; ff < n_frames; ++ ff)
{
for (int bb = 0; bb < num_basis; ++ bb)
{
for (int dd = 0; dd < prev_layer_units; ++ dd)
{
h_new_prev_layer_dedy[cnt] =
h_raw_prev_layer_dedy[ff * prev_layer_units + dd] * spkr_wgt[ff][bb] / z_spkr[ff];
cnt ++;
}
}
}
int new_size = size * num_basis;
todev_vf_vf("new_prev_layer_dedy", new_size, h_new_prev_layer_dedy, prev_layer_dedy);
delete [] h_raw_prev_layer_dedy;
delete [] h_new_prev_layer_dedy;
}
if (checking)
devcheck("train_bunch #12");
}
// Update weights.
if (cur_neg_weight_learnrate!=0.0f)
{
//cz277 - momentum
qn_dev_multnacc2_fmfmf_mf(n_frames, cur_layer_units, prev_layer_units,
cur_neg_weight_learnrate, alpha_momentum, cur_layer_dedx,
prev_layer_y, cur_weights_delta);
//weights_delta[tau] = -eta * partial_E_div_partial_weights + alpha * weights_delta[tau - 1]
//cz277 - weight decay
qn_dev_mulacc_vff_vf(cur_layer_units * prev_layer_units, cur_weights, cur_neg_weight_learnrate * weight_decay_factor, cur_weights_delta); //weights_delta[tau] = -yita * nu * weights[tau] + weights_delta[tau]
qn_dev_mulacc_vff_vf(
cur_layer_units * prev_layer_units,
cur_weights_delta, 1.0,
cur_weights); //weights[tau + 1] = weights[tau] + weights_delta[tau]
//cw564 - mbt -- TODO BEGIN set zero to entries not in diag-blocks
if (cur_layer <= n_layers - 2)
{
int weight_size = cur_layer_units * prev_layer_units;
float * h_cur_weights = dev.cache_weights;
fromdev_vf_vf("weights", weight_size, cur_weights, h_cur_weights);
for (int i = 0; i < weight_size; ++ i)
{
int col = i % prev_layer_units;
int row = i / prev_layer_units;
int one_base_col_size = prev_layer_units / num_basis;
int one_base_row_size = cur_layer_units / num_basis;
int k = min(col / one_base_col_size, row / one_base_row_size);
int base_col = col - k * one_base_col_size;
int base_row = row - k * one_base_row_size;
if (base_col >= one_base_col_size || base_row >= one_base_row_size)
{
h_cur_weights[i] = 0;
}
}
todev_vf_vf("new_weights", weight_size, h_cur_weights, cur_weights);
}
//cw564 - mbt -- TODO END set zero to entries not in diag-blocks
if (checking)
devcheck("train_bunch #13");
}
// Update biases.
if (cur_neg_bias_learnrate!=0.0f)
{
qn_dev_sumcol_mf_vf(n_frames, cur_layer_units, cur_layer_dedx,
cur_layer_delta_bias);
//cz277 - revisit momentum
qn_dev_scale_fvf_vf(cur_layer_units, alpha_momentum, cur_l_bias_delta); //acquire alpha * bias_delta[tau - 1]
qn_dev_mulacc_vff_vf(cur_layer_units, cur_layer_delta_bias, cur_neg_bias_learnrate, cur_l_bias_delta); //bias_delta[tau] = alpha * bias_delta[tau - 1] + neg_eta * partial_E_div_partial_bias
//cz277 - weight decay
qn_dev_mulacc_vff_vf(cur_layer_units, cur_layer_bias, cur_neg_weight_learnrate * weight_decay_factor, cur_l_bias_delta); //bias_delta[tau] = -yita * nu * bias[tau] + bias_delta[tau]
qn_dev_mulacc_vff_vf(cur_layer_units, cur_l_bias_delta, 1.0, cur_layer_bias); //bias[tau + 1] = bias[tau] + bias_delta[tau]
if (checking)
devcheck("train_bunch #15");
}
} // End of iteration over all layers.
// Copy the data back from the device
fromdev_vf_vf("train_bunch().out", out_size, dev.out, out);
if (checking)
devcheck("train_bunch #16");
}
void
QN_MLP_BunchCudaVar::forward(size_t n_frames, const float* in, float* out, const float * * wgt, const size_t num_basis)
{
refresh_dev_weights();
QN_MLP_BaseFl::forward(n_frames, in, out, wgt, num_basis);
}
void
QN_MLP_BunchCudaVar::train(size_t n_frames, const float* in,
const float* target, float* out, const float * * wgt, const size_t num_basis)
{
refresh_dev_weights();
QN_MLP_BaseFl::train(n_frames, in, target, out, wgt, num_basis);
host_weights_stale = QN_TRUE;
}
void
QN_MLP_BunchCudaVar::set_weights(enum QN_SectionSelector which,
size_t row, size_t col,
size_t n_rows, size_t n_cols,
const float* weights)
{
refresh_host_weights();
QN_MLP_BaseFl::set_weights(which, row, col, n_rows, n_cols, weights);
dev_weights_stale = QN_TRUE;
}
void
QN_MLP_BunchCudaVar::get_weights(enum QN_SectionSelector which,
size_t row, size_t col,
size_t n_rows, size_t n_cols,
float* weights)
{
refresh_host_weights();
QN_MLP_BaseFl::get_weights(which, row, col, n_rows, n_cols, weights);
}
void
QN_MLP_BunchCudaVar::refresh_dev_weights(void)
{
if (dev_weights_stale)
{
dev_weights_stale = QN_FALSE;
size_t i;
for (i = 0; i<n_weightmats; i++)
{
size_t n_weights;
n_weights = weights_size[i];
todev_vf_vf("refresh_dev_weights().weights",
n_weights, weights[i], dev.weights[i]);
//cz277 - momentum
todev_vf_vf("refresh_dev_weights().weights_delta",
n_weights, weights_delta[i], dev.weights_delta[i]);
}
for (i = 1; i<n_layers; i++)
{
size_t n_biases;
n_biases = layer_size[i];
todev_vf_vf("refresh_dev_weights().layer_bias",
n_biases, layer_bias[i], dev.layer_bias[i]);
//cz277 - revisit momentum
todev_vf_vf("refresh_dev_weights().l_bias_delta",
n_biases, bias_delta[i], dev.l_bias_delta[i]);
}
}
}
void
QN_MLP_BunchCudaVar::refresh_host_weights(void)
{
if (host_weights_stale)
{
host_weights_stale = QN_FALSE;
size_t i;
for (i = 0; i<n_weightmats; i++)
{
size_t n_weights;
n_weights = weights_size[i];
fromdev_vf_vf("refresh_host_weights.weights)",
n_weights, dev.weights[i], weights[i]);
//cz277 - momentum
fromdev_vf_vf("refresh_host_weights.weights_delta)",
n_weights, dev.weights_delta[i], weights_delta[i]);
}
for (i = 1; i<n_layers; i++)
{
size_t n_biases;
n_biases = layer_size[i];
fromdev_vf_vf("freresh_host_weights().layer_bias",
n_biases, dev.layer_bias[i], layer_bias[i]);
//cz277 - revisit momentum
fromdev_vf_vf("freresh_host_weights().bias_delta",
n_biases, dev.l_bias_delta[i], bias_delta[i]);
}
}
}
void
QN_MLP_BunchCudaVar::devnew_vf(const char* varname, int n, float **devptr)
{
cublasStatus e;
e = hipblasAlloc(n, sizeof(float), (void **) devptr);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device new_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_EPOCH, "Created CUDA float vec \"%s\" size %i at %.8x\n", varname, n, (unsigned long) *devptr);
}
void
QN_MLP_BunchCudaVar::devnew_vi(const char* varname, int n, int **devptr)
{
cublasStatus e;
e = hipblasAlloc(n, sizeof(int), (void **) devptr);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device new_vi error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_EPOCH, "Created CUDA int vec \"%s\" size %i at %.8x\n", varname, n, (unsigned long) *devptr);
}
void
QN_MLP_BunchCudaVar::devcheck(const char* location)
{
hipError_t e;
e = hipDeviceSynchronize();
if (e!=hipSuccess)
{
clog.error("asynchronous CUDA error at %s - %s.",
location, hipGetErrorString(e));
}
cublasStatus eb;
eb = hipblasGetError();
if (eb!=HIPBLAS_STATUS_SUCCESS)
QN_ERROR("QN_cuda_check", "accumulated cublas error detected");
}
void
QN_MLP_BunchCudaVar::devnew(const char* varname, int n, int size,
void **devptr)
{
cublasStatus e;
e = hipblasAlloc(n, size, devptr);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blasw device free error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
}
void
QN_MLP_BunchCudaVar::devfree(const char* varname, const void* devptr)
{
cublasStatus e;
e = hipblasFree((void *)devptr); //cz277 - cuda
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device free error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
}
void
QN_MLP_BunchCudaVar::devfree_vf(const char* varname, const float* devptr)
{
cublasStatus e;
e = hipblasFree((void *) devptr);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device free_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
}
void
QN_MLP_BunchCudaVar::devfree_vi(const char* varname, const int* devptr)
{
cublasStatus e;
e = hipblasFree((void *) devptr);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device free_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
}
void
QN_MLP_BunchCudaVar::todev_vf_vf(const char* varname, int n, const float* from,
float* devto)
{
cublasStatus e;
e = hipblasSetVector(n, sizeof(float), from, 1, devto, 1);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas todev_vf_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_BUNCH, "Copied %i floats to device variable \"%s\" at address %.8x\n", n, varname, devto);
}
void
QN_MLP_BunchCudaVar::fromdev_vf_vf(const char* varname, int n,
const float* devfrom, float* to)
{
cublasStatus e;
e = hipblasGetVector(n, sizeof(float), devfrom, 1, to, 1);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas fromdev_vf_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_BUNCH, "Copied %i floats from device variable \"%s\" at address %.8x\n", n, varname, devfrom);
}
void
QN_MLP_BunchCudaVar::todev_vi_vi(const char* varname, int n,
const int* from, int* devto)
{
cublasStatus e;
e = hipblasSetVector(n, sizeof(int), from, 1, devto, 1);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas todev_vi_vi error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_BUNCH, "Copied %i ints to device variable \"%s\" at address %.8x\n", n, varname, devto);
}
void
QN_MLP_BunchCudaVar::fromdev_vi_vi(const char* varname, int n,
const int* devfrom, int* to)
{
cublasStatus e;
e = hipblasGetVector(n, sizeof(int), devfrom, 1, to, 1);
if (e != HIPBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas fromdev_vi_vi error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_BUNCH, "Copied %i ints from device variable \"%s\" at address %.8x\n", n, varname, devfrom);
}
|
b9c624c5a53433759b038ca0186dd30abb0f4782.cu
|
const char* QN_MLP_BunchCudaVar_rcsid =
"$Header: /u/drspeech/repos/quicknet2/QN_MLP_BunchCudaVar.cu,v 1.5 2011/05/24 02:03:14 davidj Exp $";
/* Must include the config.h file first */
#include <QN_config.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include "QN_types.h"
#include "QN_Logger.h"
#include "QN_CudaUtils.h"
#include "QN_MLP_BunchCudaVar.h"
#include "QN_fltvec.h"
#include "QN_intvec.h"
#include "QN_cuvec.h"
#include <cuda.h>
#include <cublas.h>
// These appear later but we do not want them in the header file
// __global__ void QN_BunchCudaVar_forward_bunch(QN_BunchCudaVar_Workspace *ws,
// int n_frames);
// __global__ void QN_BunchCudaVar_train_bunch(QN_BunchCudaVar_Workspace *ws,
// int n_frames);
// __device__ void QN_BunchCudaVar_forward_bunch_do(QN_BunchCudaVar_Workspace *ws,
// int n_frames);
float * QN_MLP_BunchCudaVar::Last2_weights()
{
fromdev_vf_vf("weights", 1000 * 2698, dev.weights[n_layers - 1], dev.cache_last2_weights); //cw564 - mbt -- TODO
return dev.cache_last2_weights;
}
float * QN_MLP_BunchCudaVar::Last_y()
{
return dev.cache_raw_last2_y;
}
QN_MLP_BunchCudaVar::QN_MLP_BunchCudaVar(int a_bp_num_layer, //cz277 - nn fea bp
int a_debug,
const char* a_dbgname,
size_t a_n_layers,
const size_t a_layer_units[QN_MLP_MAX_LAYERS],
enum QN_CriterionType a_criteriontype, //cz277 - criteria
enum QN_LayerType *a_layertype, //cz277 - nonlinearity, mul actv
enum QN_OutputLayerType a_outtype,
size_t a_size_bunch,
int device_no, //cz277 - device select
const char *env_var4dev_id) //cz277 - env var
: QN_MLP_BaseFl(a_debug, a_dbgname, "QN_MLP_BunchCudaVar",
a_size_bunch, a_n_layers,
a_layer_units), //cz277 - dnn
bp_num_layer(a_bp_num_layer), //cz277 - nn fea bp
criterion_type(a_criteriontype), //cz277 - criteria
hiddenlayer_types(a_layertype), //cz277 - nonlinearity, mul actv
out_layer_type(a_outtype)
{
size_t i;
// Initialize CUDA if it has not happened already
QN_cuda_init(device_no, env_var4dev_id); //cz277 - device select, env var
// Some stuff so that when things go wrong it is more obvious.
// for (i=0; i<MAX_LAYERS; i++)
// {
// layer_x[i] = NULL;
// layer_y[i] = NULL;
// layer_dedy[i] = NULL;
// layer_dydx[i] = NULL;
// layer_dedx[i] = NULL;
// layer_delta_bias[i] = NULL;
// }
//cz277 - criteria
switch(criterion_type)
{
case QN_CRITERION_QUADRATIC:
case QN_CRITERION_XENTROPY:
break;
default:
clog.error("Failed to create an MLP with an invalid training criterion type.");
}
//cz277 - debug
/*for (int i = 1; i < a_n_layers; ++i)
printf("layer %d, srcaddr = %x, srcval = %d, dstaddr = %x, dstval = %d\n", i, a_layertype, a_layertype[2], hiddenlayer_types, hiddenlayer_types[2]);*/
//cz277 - nonlinearity, mul actv
/*switch(hiddenlayer_type)
{
case QN_LAYER_LINEAR:
case QN_LAYER_SIGMOID:
case QN_LAYER_SOFTMAX:
case QN_LAYER_TANH:
case QN_LAYER_SOFTSIGN:
break;
default:
clog.error("Failed to create an MLP with an invalid hidden layer out type.");
}*/
// Maybe we do not support all output layer types
switch(out_layer_type)
{
case QN_OUTPUT_SIGMOID:
//case QN_OUTPUT_SIGMOID_XENTROPY: //cz277 - nonlinearity
case QN_OUTPUT_TANH:
case QN_OUTPUT_SOFTMAX:
case QN_OUTPUT_LINEAR:
case QN_OUTPUT_SOFTSIGN: //cz277 - nonlinearity
break;
default:
clog.error("Failed to create an MLP with an invalid output layer type.");
}
if (size_bunch == 0)
clog.error("Cannot use a 0 bunch size.");
// Allocate device data structures
size_t in_size = layer_size[0];
size_t out_size = layer_size[n_layers-1];
devnew_vf("in", in_size, &(dev.in));
devnew_vf("out", out_size, &(dev.out));
devnew_vf("targ", out_size, &(dev.targ));
//cz277 - fast softmax
devnew_vf("compcache", out_size, &(dev.compcache));
for (i = 1; i<n_layers; i++)
{
size_t size = layer_size[i];
size_t units = layer_units[i];
devnew_vf("layer_bias", size, &(dev.layer_bias[i]));
devnew_vf("layer_y", size, &(dev.layer_y[i]));
devnew_vf("layer_x", size, &(dev.layer_x[i]));
devnew_vf("layer_dedy", size, &(dev.layer_dedy[i]));
devnew_vf("layer_dydx", size, &(dev.layer_dydx[i]));
devnew_vf("layer_dedx", size, &(dev.layer_dedx[i]));
devnew_vf("layer_delta_bias", units, &(dev.layer_delta_bias[i]));
//cz277 - revisit momentum
devnew_vf("l_bias_delta", size, &(dev.l_bias_delta[i]));
}
// Set up the per-weight-matrix data structures.
for (i = 0; i<n_weightmats; i++)
{
// Note the host weights are alloacted by QN_MLP_BaseFl
size_t n_weights = weights_size[i];
// Allocate device data structures
devnew_vf("weights", n_weights, &dev.weights[i]);
//cz277 - momentum
devnew_vf("weights_delta", n_weights, &dev.weights_delta[i]);
}
clog.log(QN_LOG_PER_RUN, "Created net with %lu layers, bunchsize %lu.",
n_layers, size_bunch);
for (i=0; i<n_layers; i++)
{
clog.log(QN_LOG_PER_RUN, "Layer %lu has %lu units.",
i+1, layer_units[i]);
}
dev_weights_stale = QN_TRUE;
host_weights_stale = QN_FALSE;
dev.cache_raw_last2_y = new float[300000000]; //cw564 - mbt -- TODO
dev.cache_weights = new float[300000000]; //cw564 - mbt -- TODO
dev.cache_last2_weights = new float[300000000];
}
QN_MLP_BunchCudaVar::~QN_MLP_BunchCudaVar()
{
size_t i;
QN_cuda_check();
// Wind down the per-weight-matrix data structures.
for (i = 0; i<n_weightmats; i++)
{
// Deallocate device data structures
devfree_vf("weights", dev.weights[i]);
//cz277 - momentum
devfree_vf("weights_delta", dev.weights_delta[i]);
// Note the host weights are deallocated by QN_MLP_BaseFl
}
// Wind down the per-layer data structures.
for (i = 1; i<n_layers; i++)
{
// delete [] layer_y[i];
// delete [] layer_delta_bias[i];
// delete [] layer_dedx[i];
// delete [] layer_dydx[i];
// delete [] layer_dedy[i];
// delete [] layer_x[i];
// Note the host biases are deallocated by QN_MLP_BaseFl
devfree_vf("layer_delta_bias", dev.layer_delta_bias[i]);
devfree_vf("layer_dedx", dev.layer_dedx[i]);
devfree_vf("layer_dydx", dev.layer_dydx[i]);
devfree_vf("layer_dedy", dev.layer_dedy[i]);
devfree_vf("layer_x", dev.layer_x[i]);
devfree_vf("layer_y", dev.layer_y[i]);
devfree_vf("layer_bias", dev.layer_bias[i]);
//cz277 - revisit momentum
devfree_vf("l_bias_delta", dev.l_bias_delta[i]);
}
devfree_vf("targ", dev.targ);
devfree_vf("out", dev.out);
devfree_vf("in", dev.in);
//cz277 - fast softmax
devfree_vf("compcache", dev.compcache);
//cw564 - mbt
delete [] dev.cache_raw_last2_y;
delete [] dev.cache_weights;
}
void
QN_MLP_BunchCudaVar::forward_bunch(size_t n_frames, const float* in, float* out, const float * * spkr_wgt, const size_t num_basis)
{
// printf("in=%x, out=%x\n", in, out);
// Copy the data across to the device
int in_size = n_frames * layer_units[0];
int out_size = n_frames * layer_units[n_layers-1];
todev_vf_vf("forward_bunch().in", in_size, in, dev.in);
size_t cur_layer; // The index of the current layer.
size_t prev_layer; // The index of the previous layer.
size_t cur_weinum; // The index of the current weight matrix.
size_t cur_layer_units; // The number of units in the current layer.
size_t prev_layer_units; // The number of units in the previous layer.
size_t cur_layer_size; // The size of the current layer.
float* cur_layer_x; // Input to the current layer non-linearity.
float* cur_layer_y; // Output from the current layer
// non-linearity.
float* prev_layer_y; // Output from the previous non-linearity.
float* cur_layer_bias; // Biases for the current layer.
float* cur_weights; // Weights inputing to the current layer.
// Iterate over all of the layers except the input. This is just one
// iteration for 2-layer MLPs.
// Note that layer index starts at 0 for inputlayer, so we start at 1.
for (cur_layer=1; cur_layer<n_layers; cur_layer++)
{
prev_layer = cur_layer - 1;
cur_weinum = cur_layer - 1;
cur_layer_units = layer_units[cur_layer];
prev_layer_units = layer_units[prev_layer];
cur_layer_size = cur_layer_units * n_frames;
cur_layer_x = dev.layer_x[cur_layer];
cur_layer_y = dev.layer_y[cur_layer];
if (cur_layer==1)
prev_layer_y = dev.in;
else if (cur_layer == n_layers - 1) //cw564 - mbt
{
float * h_cache_prev_layer_y = dev.cache_raw_last2_y;
float * d_prev_layer_y = dev.layer_y[prev_layer];
fromdev_vf_vf("mbt.ori_prev_layer_y", prev_layer_units * n_frames,
d_prev_layer_y, h_cache_prev_layer_y);
int old_prev_layer_units = prev_layer_units;
prev_layer_units /= num_basis;
float * h_wsum_prev_layer_y = new float[prev_layer_units * n_frames];
//TODO fast summation
for (int ff = 0; ff < n_frames; ++ ff)
{
for (int now_dim = 0; now_dim < prev_layer_units; ++ now_dim)
{
int new_id = ff * prev_layer_units + now_dim;
h_wsum_prev_layer_y[new_id] = 0;
for (int bb = 0; bb < num_basis; ++ bb)
{
int old_id = ff * old_prev_layer_units + bb * prev_layer_units + now_dim;
h_wsum_prev_layer_y[new_id] += h_cache_prev_layer_y[old_id] * spkr_wgt[ff][bb];
}
}
}
todev_vf_vf("mbt.new_prev_layer_y", prev_layer_units * n_frames,
h_wsum_prev_layer_y, d_prev_layer_y);
delete [] h_wsum_prev_layer_y;
prev_layer_y = d_prev_layer_y;
}
else
prev_layer_y = dev.layer_y[prev_layer];
cur_layer_bias = dev.layer_bias[cur_layer];
cur_weights = dev.weights[cur_weinum];
if (checking)
devcheck("forward_bunch #1");
//cz277 - fast softmax
qn_dev_fastcopy_vf_mf(n_frames, cur_layer_units, cur_layer_bias, cur_layer_x);
//qn_dev_copy_vf_mf(n_frames, cur_layer_units, cur_layer_bias,
// cur_layer_x);
if (checking)
devcheck("forward_bunch #2");
qn_dev_mulntacc_mfmf_mf(n_frames, prev_layer_units, cur_layer_units,
prev_layer_y, cur_weights,
cur_layer_x);
if (checking)
devcheck("forward_bunch #3");
// Check if we are doing things differently for the final layer.
if (cur_layer!=n_layers - 1)
{
//cz277 - nonlinearity, mul actv
switch(hiddenlayer_types[cur_layer])
{
case QN_LAYER_LINEAR:
qn_dev_copy_vf_vf(cur_layer_size, cur_layer_x, cur_layer_y);
break;
case QN_LAYER_SIGMOID:
qn_dev_sigmoid_vf_vf(cur_layer_size, cur_layer_x, cur_layer_y);
break;
case QN_LAYER_SOFTMAX:
qn_dev_multisoftmax_mf_mf(n_frames, cur_layer_units, cur_layer_x, cur_layer_y);
break;
case QN_LAYER_TANH:
qn_dev_tanh_vf_vf(cur_layer_size, cur_layer_x, cur_layer_y);
break;
case QN_LAYER_SOFTSIGN:
qn_dev_softsign_vf_vf(cur_layer_size, cur_layer_x, cur_layer_y);
break;
default:
//cz277 - debug
/*printf("curlayer = %d, curtype = %d, linear = %d, sigmoid = %d, softmax = %d, tanh = %d, softsign = %d\n", cur_layer, hiddenlayer_types[cur_layer], QN_LAYER_LINEAR, QN_LAYER_SIGMOID, QN_LAYER_SOFTMAX, QN_LAYER_TANH, QN_LAYER_SOFTSIGN);*/
assert(0);
}
}
else
{
//cz277 - nonlinear //cz277 - criteria
switch(out_layer_type)
{
case QN_OUTPUT_LINEAR:
qn_dev_copy_vf_vf(cur_layer_size, cur_layer_x, dev.out);
break;
case QN_OUTPUT_SIGMOID:
qn_dev_sigmoid_vf_vf(cur_layer_size, cur_layer_x, dev.out);
break;
case QN_OUTPUT_SOFTMAX:
//qn_dev_multisoftmax_mf_mf(n_frames, cur_layer_units, cur_layer_x, dev.out);
//cz277 - fast softmax
qn_dev_fastsoftmax_mf_mf(n_frames, cur_layer_units, cur_layer_x, dev.compcache, dev.out);
/*fromdev_vf_vf("forward_bunch().out", out_size, cur_layer_x, out);
printf("outsize = %d, in = %e, ", out_size, out[0]);
fromdev_vf_vf("forward_bunch().out", out_size, dev.compcache, out);
fromdev_vf_vf("forward_bunch().out", out_size, dev.out, out);
printf("out = %e\n", out[0]);*/
break;
case QN_OUTPUT_TANH:
qn_dev_tanh_vf_vf(cur_layer_size, cur_layer_x, dev.out);
break;
case QN_OUTPUT_SOFTSIGN:
qn_dev_softsign_vf_vf(cur_layer_size, cur_layer_x, dev.out);
break;
default:
assert(0);
}
}
}
// Copy the data back from the device
fromdev_vf_vf("forward_bunch().out", out_size, dev.out, out);
if (checking)
devcheck("forward_bunch #9");
}
void
QN_MLP_BunchCudaVar::train_bunch(size_t n_frames, const float *in,
const float* target, float* out, const float * * spkr_wgt, const size_t num_basis)
{
// First move forward, which copies over in and out
forward_bunch(n_frames, in, out, spkr_wgt, num_basis);
if (checking)
devcheck("train_bunch #0");
// So we stil have to copy across targ
int out_size = n_frames * layer_units[n_layers-1];
todev_vf_vf("train_bunch().targ", out_size, target, dev.targ);
if (checking)
devcheck("train_bunch #1");
size_t cur_layer; // The index of the current layer.
size_t prev_layer; // The index of the previous layer.
size_t cur_weinum; // The index of the current weight matrix.
size_t cur_layer_units; // The number of units in the current layer.
size_t prev_layer_units; // The number of units in the previous layer.
size_t cur_layer_size; // The size of the current layer.
float* cur_layer_y; // Output from the current layer
// non-linearity.
const float* prev_layer_y; // Output from the previous non-linearity.
float* cur_layer_dydx; // dydx for the current layer.
float* cur_layer_dedy; // dedy for the current layer.
float* prev_layer_dedy; // dedy for the previous layer.
float* cur_layer_dedx; // dedx for the current layer.
float* cur_layer_bias; // Biases for the current layer.
float* cur_layer_delta_bias; // Delta biases for the current layer.
float* cur_weights; // Weights inputing to the current layer.
//cz277 - momentum
float *cur_weights_delta;
//cz277 - revisit momentum
float *cur_l_bias_delta;
//cz277 - nn fea bp
int bp_num_layer_idx = this->bp_num_layer;
//cw564 - mbt
float * z_spkr = new float[n_frames];
for (int ff = 0; ff < n_frames; ++ ff)
{
z_spkr[ff] = 0;
for (int bb = 0; bb < num_basis; ++ bb) z_spkr[ff] += spkr_wgt[ff][bb];
}
// Iterate back over all layers but the first.
for (cur_layer=n_layers-1; cur_layer>0 && (bp_num_layer_idx != 0); cur_layer--, --bp_num_layer_idx) //cz277 - nn fea bp
{
prev_layer = cur_layer - 1;
cur_weinum = cur_layer - 1;
cur_layer_units = layer_units[cur_layer];
prev_layer_units = layer_units[prev_layer];
cur_layer_size = cur_layer_units * n_frames;
//cw564 - mbt
if (cur_layer == n_layers - 2)
{
todev_vf_vf("mbt.raw_cur_layer_last2_y", cur_layer_size,
dev.cache_raw_last2_y, dev.layer_y[cur_layer]);
}
cur_layer_y = dev.layer_y[cur_layer];
if (cur_layer==1)
prev_layer_y = dev.in;
else
prev_layer_y = dev.layer_y[prev_layer];
cur_layer_dydx = dev.layer_dydx[cur_layer];
cur_layer_dedy = dev.layer_dedy[cur_layer];
prev_layer_dedy = dev.layer_dedy[prev_layer];
cur_layer_dedx = dev.layer_dedx[cur_layer];
cur_layer_bias = dev.layer_bias[cur_layer];
cur_layer_delta_bias = dev.layer_delta_bias[cur_layer];
cur_weights = dev.weights[cur_weinum];
//cz277 - momentum
cur_weights_delta = dev.weights_delta[cur_weinum];
//cz277 - revisit momentum
cur_l_bias_delta = dev.l_bias_delta[cur_layer];
float cur_neg_weight_learnrate = neg_weight_learnrate[cur_weinum];
float cur_neg_bias_learnrate = neg_bias_learnrate[cur_layer];
if (cur_layer!=n_layers - 1 && backprop_weights[cur_weinum+1])
{
//cz277 - nonlinearity, mul actv
switch(hiddenlayer_types[cur_layer])
{
case QN_LAYER_LINEAR:
qn_dev_copy_vf_vf(cur_layer_size, cur_layer_dedy, cur_layer_dedx);
break;
case QN_LAYER_SIGMOID:
qn_dev_dsigmoid_vf_vf(cur_layer_size, cur_layer_y, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
break;
case QN_LAYER_SOFTMAX:
qn_dev_dsoftmax_vf_vf(cur_layer_size, cur_layer_y, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
break;
case QN_LAYER_TANH:
qn_dev_dtanh_vf_vf(cur_layer_size, cur_layer_y, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
break;
case QN_LAYER_SOFTSIGN:
qn_dev_dsoftsign_vf_vf(cur_layer_size, cur_layer_y, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
break;
default:
assert(0);
}
}
else
{
//cz277 - nonlinearity //cz277 - criteria
// Going back through the output layer.
switch(out_layer_type)
{
case QN_OUTPUT_LINEAR:
if (criterion_type == QN_CRITERION_QUADRATIC) {
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedx); //dx = dy
} else { //xentropy
qn_dev_dxentropy_vf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedx);
}
break;
case QN_OUTPUT_SIGMOID:
if (criterion_type == QN_CRITERION_QUADRATIC) {
// For a sigmoid layer, de/dx = de/dy . dy/dx
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dsigmoid_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
} else { //xentropy
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedx); //dx = dy
}
break;
case QN_OUTPUT_SOFTMAX:
if (criterion_type == QN_CRITERION_QUADRATIC) {
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dsoftmax_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
} else { //xentropy
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedx); //dx = dy
//qn_dev_dxentropy_vf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
//qn_dev_dsoftmax_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
//qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
}
break;
case QN_OUTPUT_TANH:
if (criterion_type == QN_CRITERION_QUADRATIC) {
// tanh output layer very similar to sigmoid
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dtanh_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
} else { //xentropy
qn_dev_dxentropy_vf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dtanh_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
//qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, cur_layer_dedy, cur_layer_dedx); //wll
}
break;
case QN_OUTPUT_SOFTSIGN:
if (criterion_type == QN_CRITERION_QUADRATIC) {
qn_dev_sub_vfvf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dsoftsign_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
} else { //xentropy
qn_dev_dxentropy_vf_vf(cur_layer_size, dev.out, dev.targ, cur_layer_dedy);
qn_dev_dsoftsign_vf_vf(cur_layer_size, dev.out, cur_layer_dydx);
qn_dev_mul_vfvf_vf(cur_layer_size, cur_layer_dydx, cur_layer_dedy, cur_layer_dedx);
}
break;
default:
assert(0);
} // End of output layer type switch.
} // End of special output layer treatment.
// Back propogate error through this layer.
if (cur_layer!=1 && backprop_weights[cur_weinum])
{
//cw564 - mbt
if (cur_layer == n_layers - 1)
prev_layer_units = prev_layer_units / num_basis;
qn_dev_mul_mfmf_mf(n_frames, cur_layer_units, prev_layer_units,
cur_layer_dedx, cur_weights, prev_layer_dedy);
//cw564 - mbt -- prev_layer_dedy
if (cur_layer == n_layers - 1)
{
int size = n_frames * prev_layer_units;
float * h_raw_prev_layer_dedy = new float[size];
float * h_new_prev_layer_dedy = new float[size * num_basis];
fromdev_vf_vf("raw_prev_layer_dedy", size, prev_layer_dedy, h_raw_prev_layer_dedy);
int cnt = 0;
for (int ff = 0; ff < n_frames; ++ ff)
{
for (int bb = 0; bb < num_basis; ++ bb)
{
for (int dd = 0; dd < prev_layer_units; ++ dd)
{
h_new_prev_layer_dedy[cnt] =
h_raw_prev_layer_dedy[ff * prev_layer_units + dd] * spkr_wgt[ff][bb] / z_spkr[ff];
cnt ++;
}
}
}
int new_size = size * num_basis;
todev_vf_vf("new_prev_layer_dedy", new_size, h_new_prev_layer_dedy, prev_layer_dedy);
delete [] h_raw_prev_layer_dedy;
delete [] h_new_prev_layer_dedy;
}
if (checking)
devcheck("train_bunch #12");
}
// Update weights.
if (cur_neg_weight_learnrate!=0.0f)
{
//cz277 - momentum
qn_dev_multnacc2_fmfmf_mf(n_frames, cur_layer_units, prev_layer_units,
cur_neg_weight_learnrate, alpha_momentum, cur_layer_dedx,
prev_layer_y, cur_weights_delta);
//weights_delta[tau] = -eta * partial_E_div_partial_weights + alpha * weights_delta[tau - 1]
//cz277 - weight decay
qn_dev_mulacc_vff_vf(cur_layer_units * prev_layer_units, cur_weights, cur_neg_weight_learnrate * weight_decay_factor, cur_weights_delta); //weights_delta[tau] = -yita * nu * weights[tau] + weights_delta[tau]
qn_dev_mulacc_vff_vf(
cur_layer_units * prev_layer_units,
cur_weights_delta, 1.0,
cur_weights); //weights[tau + 1] = weights[tau] + weights_delta[tau]
//cw564 - mbt -- TODO BEGIN set zero to entries not in diag-blocks
if (cur_layer <= n_layers - 2)
{
int weight_size = cur_layer_units * prev_layer_units;
float * h_cur_weights = dev.cache_weights;
fromdev_vf_vf("weights", weight_size, cur_weights, h_cur_weights);
for (int i = 0; i < weight_size; ++ i)
{
int col = i % prev_layer_units;
int row = i / prev_layer_units;
int one_base_col_size = prev_layer_units / num_basis;
int one_base_row_size = cur_layer_units / num_basis;
int k = min(col / one_base_col_size, row / one_base_row_size);
int base_col = col - k * one_base_col_size;
int base_row = row - k * one_base_row_size;
if (base_col >= one_base_col_size || base_row >= one_base_row_size)
{
h_cur_weights[i] = 0;
}
}
todev_vf_vf("new_weights", weight_size, h_cur_weights, cur_weights);
}
//cw564 - mbt -- TODO END set zero to entries not in diag-blocks
if (checking)
devcheck("train_bunch #13");
}
// Update biases.
if (cur_neg_bias_learnrate!=0.0f)
{
qn_dev_sumcol_mf_vf(n_frames, cur_layer_units, cur_layer_dedx,
cur_layer_delta_bias);
//cz277 - revisit momentum
qn_dev_scale_fvf_vf(cur_layer_units, alpha_momentum, cur_l_bias_delta); //acquire alpha * bias_delta[tau - 1]
qn_dev_mulacc_vff_vf(cur_layer_units, cur_layer_delta_bias, cur_neg_bias_learnrate, cur_l_bias_delta); //bias_delta[tau] = alpha * bias_delta[tau - 1] + neg_eta * partial_E_div_partial_bias
//cz277 - weight decay
qn_dev_mulacc_vff_vf(cur_layer_units, cur_layer_bias, cur_neg_weight_learnrate * weight_decay_factor, cur_l_bias_delta); //bias_delta[tau] = -yita * nu * bias[tau] + bias_delta[tau]
qn_dev_mulacc_vff_vf(cur_layer_units, cur_l_bias_delta, 1.0, cur_layer_bias); //bias[tau + 1] = bias[tau] + bias_delta[tau]
if (checking)
devcheck("train_bunch #15");
}
} // End of iteration over all layers.
// Copy the data back from the device
fromdev_vf_vf("train_bunch().out", out_size, dev.out, out);
if (checking)
devcheck("train_bunch #16");
}
void
QN_MLP_BunchCudaVar::forward(size_t n_frames, const float* in, float* out, const float * * wgt, const size_t num_basis)
{
refresh_dev_weights();
QN_MLP_BaseFl::forward(n_frames, in, out, wgt, num_basis);
}
void
QN_MLP_BunchCudaVar::train(size_t n_frames, const float* in,
const float* target, float* out, const float * * wgt, const size_t num_basis)
{
refresh_dev_weights();
QN_MLP_BaseFl::train(n_frames, in, target, out, wgt, num_basis);
host_weights_stale = QN_TRUE;
}
void
QN_MLP_BunchCudaVar::set_weights(enum QN_SectionSelector which,
size_t row, size_t col,
size_t n_rows, size_t n_cols,
const float* weights)
{
refresh_host_weights();
QN_MLP_BaseFl::set_weights(which, row, col, n_rows, n_cols, weights);
dev_weights_stale = QN_TRUE;
}
void
QN_MLP_BunchCudaVar::get_weights(enum QN_SectionSelector which,
size_t row, size_t col,
size_t n_rows, size_t n_cols,
float* weights)
{
refresh_host_weights();
QN_MLP_BaseFl::get_weights(which, row, col, n_rows, n_cols, weights);
}
void
QN_MLP_BunchCudaVar::refresh_dev_weights(void)
{
if (dev_weights_stale)
{
dev_weights_stale = QN_FALSE;
size_t i;
for (i = 0; i<n_weightmats; i++)
{
size_t n_weights;
n_weights = weights_size[i];
todev_vf_vf("refresh_dev_weights().weights",
n_weights, weights[i], dev.weights[i]);
//cz277 - momentum
todev_vf_vf("refresh_dev_weights().weights_delta",
n_weights, weights_delta[i], dev.weights_delta[i]);
}
for (i = 1; i<n_layers; i++)
{
size_t n_biases;
n_biases = layer_size[i];
todev_vf_vf("refresh_dev_weights().layer_bias",
n_biases, layer_bias[i], dev.layer_bias[i]);
//cz277 - revisit momentum
todev_vf_vf("refresh_dev_weights().l_bias_delta",
n_biases, bias_delta[i], dev.l_bias_delta[i]);
}
}
}
void
QN_MLP_BunchCudaVar::refresh_host_weights(void)
{
if (host_weights_stale)
{
host_weights_stale = QN_FALSE;
size_t i;
for (i = 0; i<n_weightmats; i++)
{
size_t n_weights;
n_weights = weights_size[i];
fromdev_vf_vf("refresh_host_weights.weights)",
n_weights, dev.weights[i], weights[i]);
//cz277 - momentum
fromdev_vf_vf("refresh_host_weights.weights_delta)",
n_weights, dev.weights_delta[i], weights_delta[i]);
}
for (i = 1; i<n_layers; i++)
{
size_t n_biases;
n_biases = layer_size[i];
fromdev_vf_vf("freresh_host_weights().layer_bias",
n_biases, dev.layer_bias[i], layer_bias[i]);
//cz277 - revisit momentum
fromdev_vf_vf("freresh_host_weights().bias_delta",
n_biases, dev.l_bias_delta[i], bias_delta[i]);
}
}
}
void
QN_MLP_BunchCudaVar::devnew_vf(const char* varname, int n, float **devptr)
{
cublasStatus e;
e = cublasAlloc(n, sizeof(float), (void **) devptr);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device new_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_EPOCH, "Created CUDA float vec \"%s\" size %i at %.8x\n", varname, n, (unsigned long) *devptr);
}
void
QN_MLP_BunchCudaVar::devnew_vi(const char* varname, int n, int **devptr)
{
cublasStatus e;
e = cublasAlloc(n, sizeof(int), (void **) devptr);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device new_vi error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_EPOCH, "Created CUDA int vec \"%s\" size %i at %.8x\n", varname, n, (unsigned long) *devptr);
}
void
QN_MLP_BunchCudaVar::devcheck(const char* location)
{
cudaError_t e;
e = cudaThreadSynchronize();
if (e!=cudaSuccess)
{
clog.error("asynchronous CUDA error at %s - %s.",
location, cudaGetErrorString(e));
}
cublasStatus eb;
eb = cublasGetError();
if (eb!=CUBLAS_STATUS_SUCCESS)
QN_ERROR("QN_cuda_check", "accumulated cublas error detected");
}
void
QN_MLP_BunchCudaVar::devnew(const char* varname, int n, int size,
void **devptr)
{
cublasStatus e;
e = cublasAlloc(n, size, devptr);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blasw device free error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
}
void
QN_MLP_BunchCudaVar::devfree(const char* varname, const void* devptr)
{
cublasStatus e;
e = cublasFree((void *)devptr); //cz277 - cuda
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device free error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
}
void
QN_MLP_BunchCudaVar::devfree_vf(const char* varname, const float* devptr)
{
cublasStatus e;
e = cublasFree((void *) devptr);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device free_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
}
void
QN_MLP_BunchCudaVar::devfree_vi(const char* varname, const int* devptr)
{
cublasStatus e;
e = cublasFree((void *) devptr);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas device free_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
}
void
QN_MLP_BunchCudaVar::todev_vf_vf(const char* varname, int n, const float* from,
float* devto)
{
cublasStatus e;
e = cublasSetVector(n, sizeof(float), from, 1, devto, 1);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas todev_vf_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_BUNCH, "Copied %i floats to device variable \"%s\" at address %.8x\n", n, varname, devto);
}
void
QN_MLP_BunchCudaVar::fromdev_vf_vf(const char* varname, int n,
const float* devfrom, float* to)
{
cublasStatus e;
e = cublasGetVector(n, sizeof(float), devfrom, 1, to, 1);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas fromdev_vf_vf error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_BUNCH, "Copied %i floats from device variable \"%s\" at address %.8x\n", n, varname, devfrom);
}
void
QN_MLP_BunchCudaVar::todev_vi_vi(const char* varname, int n,
const int* from, int* devto)
{
cublasStatus e;
e = cublasSetVector(n, sizeof(int), from, 1, devto, 1);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas todev_vi_vi error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_BUNCH, "Copied %i ints to device variable \"%s\" at address %.8x\n", n, varname, devto);
}
void
QN_MLP_BunchCudaVar::fromdev_vi_vi(const char* varname, int n,
const int* devfrom, int* to)
{
cublasStatus e;
e = cublasGetVector(n, sizeof(int), devfrom, 1, to, 1);
if (e != CUBLAS_STATUS_SUCCESS)
{
clog.error("cuda blas fromdev_vi_vi error variable %s - %s.",
varname, QN_cublas_error_string(e));
}
clog.log(QN_LOG_PER_BUNCH, "Copied %i ints from device variable \"%s\" at address %.8x\n", n, varname, devfrom);
}
|
dadd6a207a7008ddbe999da37fcb552c38c589ba.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
//1 prac 2
//2 prac 3
#define prac 2
#define CUDA_CALL(x) { const hipError_t a = (x); if(a != hipSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", hipGetErrorString(a), a, __LINE__); hipDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
hipEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(hipEventCreate(&cuda_timer_start));
CUDA_CALL(hipEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(hipEventDestroy(cuda_timer_start));
CUDA_CALL(hipEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
hipEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
hipEventRecord(cuda_timer_stop, CUDA_STREAM_0);
hipEventSynchronize(cuda_timer_stop);
hipEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
#if prac==1
typedef struct {
int width;
int height;
float *elements;
} Array;
#define MAX_N_ELEMENTS (1 << 20)
void generate_random_float_array(float *array, int n) {
int i;
for (i = 0; i < n; i++) {
array[i] = 3.1415926f*((float)rand() / RAND_MAX);
}
}
void combine_two_arrays(float *x, float *y, float *z, int n) {
int i;
for (i = 0; i < n; i++) {
z[i] = 1.0f / (sin(x[i])*cos(y[i]) + cos(x[i])*sin(y[i]));
}
}
__global__ void CombineTwoArrraysKernel(Array A, Array B, Array C) {
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int id = gridDim.x*blockDim.x*row + col;
C.elements[id] = 1.0f / (sin(A.elements[id])*cos(B.elements[id]) + cos(A.elements[id])*sin(B.elements[id]));
}
hipError_t combine_two_arrays_GPU(const Array A, const Array B, Array C);
int BLOCK_SIZE = 16;
int main()
{
int n_elements;
srand((unsigned int)time(NULL));
n_elements = MAX_N_ELEMENTS;
Array A, B, C, G;
A.width = B.width = C.width = G.width = 1024;
A.height = B.height = C.height = G.height = MAX_N_ELEMENTS / 1024;
A.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
B.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
C.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
G.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
generate_random_float_array(A.elements, MAX_N_ELEMENTS);
generate_random_float_array(B.elements, MAX_N_ELEMENTS);
CHECK_TIME_START;
combine_two_arrays(A.elements, B.elements, C.elements, n_elements);
CHECK_TIME_END(compute_time);
printf("***GPU C[10] = %f/ Time taken = %.6fms\n", C.elements[10], compute_time);
hipError_t cudaStatus = combine_two_arrays_GPU(A, B, G);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "combine_two_arrays_GPU failed!");
return 1;
}
printf("***GPU G[10] = %f/ Time taken = %.6fms\n", G.elements[10], device_time);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
return 1;
}
return 0;
}
hipError_t combine_two_arrays_GPU(const Array A, const Array B, Array C) {
// .
CHECK_TIME_INIT_GPU()
CHECK_TIME_START_GPU()
CHECK_TIME_END_GPU(device_time)
CHECK_TIME_DEST_GPU()
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
Array d_A, d_B, d_C;
size_t size;
d_A.width = A.width; d_A.height = A.height;
size = A.width * A.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_A.elements, size))
CUDA_CALL(hipMemcpy(d_A.elements, A.elements, size, hipMemcpyHostToDevice))
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_B.elements, size))
CUDA_CALL(hipMemcpy(d_B.elements, B.elements, size, hipMemcpyHostToDevice))
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
CUDA_CALL(hipMalloc(&d_C.elements, size))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x, A.height / dimBlock.y);
CombineTwoArrraysKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C);
CUDA_CALL(hipGetLastError())
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(hipDeviceSynchronize())
CUDA_CALL(hipMemcpy(C.elements, d_C.elements, size, hipMemcpyDeviceToHost))
Error:
hipFree(d_A.elements);
hipFree(d_B.elements);
hipFree(d_C.elements);
return cudaStatus;
}
#endif
#if prac==2
int n;
#define BLOCK_SIZE 32
#define ELEMENT_SIZE (1<<10)
const int ELEM_PER_VECTOR = 32;
float(*pVecX), (*pVecY), (*pVecY_G);
float(*pMatA);
void init_MatVec(void)
{
srand((unsigned)time(NULL));
FILE* fp = fopen("gen.bin", "rb");
fread(&n, sizeof(float), 1, fp);
pVecX = new float[n * ELEM_PER_VECTOR];
pVecY = new float[n * ELEM_PER_VECTOR];
pVecY_G = new float[n * ELEM_PER_VECTOR];
pMatA = new float[ELEM_PER_VECTOR * ELEM_PER_VECTOR];
fread(pVecX, sizeof(float), n * ELEM_PER_VECTOR, fp);
fread(pMatA, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
fclose(fp);
}
void Mat_Vec_Multiply()
{
int vec_idx, i, j;
for (vec_idx = 0; vec_idx < ELEMENT_SIZE; vec_idx++) {
for (i = 0; i < ELEM_PER_VECTOR; i++) {
float sum = 0;
for (j = 0; j < ELEM_PER_VECTOR; j++) {
sum += pMatA[i * ELEM_PER_VECTOR + j] * pVecX[vec_idx * ELEM_PER_VECTOR + j];
}
pVecY[vec_idx * ELEM_PER_VECTOR + i] = sum;
}
}
}
__global__ void Mat_Vec_Multiply_Kernel(float *d_VecY, float *d_VecX, float *d_MatA, int Vec_Size)
{
int vec_idx = blockIdx.x * blockDim.x + threadIdx.x ;
for (int i = 0; i < ELEM_PER_VECTOR; i++) {
float sum = 0;
for (int j = 0; j < ELEM_PER_VECTOR; j++) {
sum += d_MatA[i * ELEM_PER_VECTOR + j] * d_VecX[vec_idx * ELEM_PER_VECTOR + j];
}
d_VecY[vec_idx * ELEM_PER_VECTOR + i] = sum;
}
}
void Mat_Vec_Multiply_GPU(float *p_VecX, float *p_MatA, float *p_VecY_G)
{
// .
CHECK_TIME_INIT_GPU()
CHECK_TIME_START_GPU()
hipError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?");
}///////////// if(cu..... ==CUDA_CALL
float *d_VecX, *d_MatA, *d_VecY_G;
size_t size;
size = ELEM_PER_VECTOR * n * sizeof(float);
CUDA_CALL(hipMalloc(&d_VecX, size));
CUDA_CALL(hipMemcpy(d_VecX, p_VecX, size, hipMemcpyHostToDevice));
CUDA_CALL(hipMalloc(&d_VecY_G, size));
size = ELEM_PER_VECTOR * ELEM_PER_VECTOR * sizeof(float);
CUDA_CALL(hipMalloc(&d_MatA, size));
CUDA_CALL(hipMemcpy(d_MatA, p_MatA, size, hipMemcpyHostToDevice));
dim3 dimBlock = (BLOCK_SIZE);
dim3 dimGrid = (n / BLOCK_SIZE);
Mat_Vec_Multiply_Kernel << < dimGrid, dimBlock >> > (d_VecY_G, d_VecX, d_MatA, ELEM_PER_VECTOR);
CUDA_CALL(hipMemcpy(p_VecY_G, d_VecY_G, ELEM_PER_VECTOR * n * sizeof(float), hipMemcpyDeviceToHost));
CUDA_CALL(hipFree(d_VecX));
CUDA_CALL(hipFree(d_VecY_G));
CUDA_CALL(hipFree(d_MatA));
CHECK_TIME_END_GPU(device_time)
CHECK_TIME_DEST_GPU()
}
void init_data(int size) {
srand((unsigned)time(NULL));
FILE *fp = fopen("gen.bin", "wb");
fwrite(&size, sizeof(int), 1, fp);
int i, j;
float x;
for (i = 0; i < size; i++) {
for (j = 0; j < ELEM_PER_VECTOR; j++) {
x = 2.0f*((float)rand() / RAND_MAX) - 1.0f;
fwrite(&x, sizeof(float), 1, fp);
}
}
for (i = 0; i < ELEM_PER_VECTOR; i++) {
for (j = 0; j < ELEM_PER_VECTOR; j++) {
x = 2.0f*((float)rand() / RAND_MAX) - 1.0f;
fwrite(&x, sizeof(float), 1, fp);
}
}
fclose(fp);
return;
}
int main()
{
init_data(ELEMENT_SIZE);
init_MatVec();
printf("n = %d file open ok.\n", n);
CHECK_TIME_START;
Mat_Vec_Multiply();
CHECK_TIME_END(compute_time);
printf("***CPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY[0 * ELEM_PER_VECTOR + 0], compute_time);
Mat_Vec_Multiply_GPU(pVecX, pMatA, pVecY_G);
printf("***GPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY_G[0 * ELEM_PER_VECTOR + 0], device_time);
int vec_idx, i;
for (i = 0; i < ELEMENT_SIZE * ELEM_PER_VECTOR; i++) {
if (fabs(pVecY[i] - pVecY_G[i]) > 0.001) {
printf("Kernel execution fail!!\n\n");
break;
}
}
}
#endif
|
dadd6a207a7008ddbe999da37fcb552c38c589ba.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include<stdio.h>
#include<stdlib.h>
#include <math.h>
#include <Windows.h>
#include <time.h>
#include <assert.h>
//1 prac 2
//2 prac 3
#define prac 2
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
typedef float TIMER_T;
#define USE_CPU_TIMER 1
#define USE_GPU_TIMER 1
#if USE_CPU_TIMER == 1
__int64 start, freq, end;
#define CHECK_TIME_START { QueryPerformanceFrequency((LARGE_INTEGER*)&freq); QueryPerformanceCounter((LARGE_INTEGER*)&start); }
#define CHECK_TIME_END(a) { QueryPerformanceCounter((LARGE_INTEGER*)&end); a = (float)((float)(end - start) / (freq / 1000.0f)); }
#else
#define CHECK_TIME_START
#define CHECK_TIME_END(a)
#endif
#if USE_GPU_TIMER == 1
cudaEvent_t cuda_timer_start, cuda_timer_stop;
#define CUDA_STREAM_0 (0)
void create_device_timer()
{
CUDA_CALL(cudaEventCreate(&cuda_timer_start));
CUDA_CALL(cudaEventCreate(&cuda_timer_stop));
}
void destroy_device_timer()
{
CUDA_CALL(cudaEventDestroy(cuda_timer_start));
CUDA_CALL(cudaEventDestroy(cuda_timer_stop));
}
inline void start_device_timer()
{
cudaEventRecord(cuda_timer_start, CUDA_STREAM_0);
}
inline TIMER_T stop_device_timer()
{
TIMER_T ms;
cudaEventRecord(cuda_timer_stop, CUDA_STREAM_0);
cudaEventSynchronize(cuda_timer_stop);
cudaEventElapsedTime(&ms, cuda_timer_start, cuda_timer_stop);
return ms;
}
#define CHECK_TIME_INIT_GPU() { create_device_timer(); }
#define CHECK_TIME_START_GPU() { start_device_timer(); }
#define CHECK_TIME_END_GPU(a) { a = stop_device_timer(); }
#define CHECK_TIME_DEST_GPU() { destroy_device_timer(); }
#else
#define CHECK_TIME_INIT_GPU()
#define CHECK_TIME_START_GPU()
#define CHECK_TIME_END_GPU(a)
#define CHECK_TIME_DEST_GPU()
#endif
TIMER_T compute_time = 0;
TIMER_T device_time = 0;
#if prac==1
typedef struct {
int width;
int height;
float *elements;
} Array;
#define MAX_N_ELEMENTS (1 << 20)
void generate_random_float_array(float *array, int n) {
int i;
for (i = 0; i < n; i++) {
array[i] = 3.1415926f*((float)rand() / RAND_MAX);
}
}
void combine_two_arrays(float *x, float *y, float *z, int n) {
int i;
for (i = 0; i < n; i++) {
z[i] = 1.0f / (sin(x[i])*cos(y[i]) + cos(x[i])*sin(y[i]));
}
}
__global__ void CombineTwoArrraysKernel(Array A, Array B, Array C) {
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int id = gridDim.x*blockDim.x*row + col;
C.elements[id] = 1.0f / (sin(A.elements[id])*cos(B.elements[id]) + cos(A.elements[id])*sin(B.elements[id]));
}
cudaError_t combine_two_arrays_GPU(const Array A, const Array B, Array C);
int BLOCK_SIZE = 16;
int main()
{
int n_elements;
srand((unsigned int)time(NULL));
n_elements = MAX_N_ELEMENTS;
Array A, B, C, G;
A.width = B.width = C.width = G.width = 1024;
A.height = B.height = C.height = G.height = MAX_N_ELEMENTS / 1024;
A.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
B.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
C.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
G.elements = (float *)malloc(sizeof(float)*MAX_N_ELEMENTS);
generate_random_float_array(A.elements, MAX_N_ELEMENTS);
generate_random_float_array(B.elements, MAX_N_ELEMENTS);
CHECK_TIME_START;
combine_two_arrays(A.elements, B.elements, C.elements, n_elements);
CHECK_TIME_END(compute_time);
printf("***GPU C[10] = %f/ Time taken = %.6fms\n", C.elements[10], compute_time);
cudaError_t cudaStatus = combine_two_arrays_GPU(A, B, G);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "combine_two_arrays_GPU failed!");
return 1;
}
printf("***GPU G[10] = %f/ Time taken = %.6fms\n", G.elements[10], device_time);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
cudaError_t combine_two_arrays_GPU(const Array A, const Array B, Array C) {
//아래 함수들을 사용하여 어떻게 하면 가급적 정확한 시간을 측정할 수 있을지 생각해볼 것.
CHECK_TIME_INIT_GPU()
CHECK_TIME_START_GPU()
CHECK_TIME_END_GPU(device_time)
CHECK_TIME_DEST_GPU()
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}///////////// if(cu..... ==CUDA_CALL
Array d_A, d_B, d_C;
size_t size;
d_A.width = A.width; d_A.height = A.height;
size = A.width * A.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_A.elements, size))
CUDA_CALL(cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice))
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_B.elements, size))
CUDA_CALL(cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice))
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
CUDA_CALL(cudaMalloc(&d_C.elements, size))
// Assume that width and height are multiples of BLOCK SIZE.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(A.width / dimBlock.x, A.height / dimBlock.y);
CombineTwoArrraysKernel << < dimGrid, dimBlock >> > (d_A, d_B, d_C);
CUDA_CALL(cudaGetLastError())
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
CUDA_CALL(cudaDeviceSynchronize())
CUDA_CALL(cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost))
Error:
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
return cudaStatus;
}
#endif
#if prac==2
int n;
#define BLOCK_SIZE 32
#define ELEMENT_SIZE (1<<10)
const int ELEM_PER_VECTOR = 32;
float(*pVecX), (*pVecY), (*pVecY_G);
float(*pMatA);
void init_MatVec(void)
{
srand((unsigned)time(NULL));
FILE* fp = fopen("gen.bin", "rb");
fread(&n, sizeof(float), 1, fp);
pVecX = new float[n * ELEM_PER_VECTOR];
pVecY = new float[n * ELEM_PER_VECTOR];
pVecY_G = new float[n * ELEM_PER_VECTOR];
pMatA = new float[ELEM_PER_VECTOR * ELEM_PER_VECTOR];
fread(pVecX, sizeof(float), n * ELEM_PER_VECTOR, fp);
fread(pMatA, sizeof(float), ELEM_PER_VECTOR * ELEM_PER_VECTOR, fp);
fclose(fp);
}
void Mat_Vec_Multiply()
{
int vec_idx, i, j;
for (vec_idx = 0; vec_idx < ELEMENT_SIZE; vec_idx++) {
for (i = 0; i < ELEM_PER_VECTOR; i++) {
float sum = 0;
for (j = 0; j < ELEM_PER_VECTOR; j++) {
sum += pMatA[i * ELEM_PER_VECTOR + j] * pVecX[vec_idx * ELEM_PER_VECTOR + j];
}
pVecY[vec_idx * ELEM_PER_VECTOR + i] = sum;
}
}
}
__global__ void Mat_Vec_Multiply_Kernel(float *d_VecY, float *d_VecX, float *d_MatA, int Vec_Size)
{
int vec_idx = blockIdx.x * blockDim.x + threadIdx.x ;
for (int i = 0; i < ELEM_PER_VECTOR; i++) {
float sum = 0;
for (int j = 0; j < ELEM_PER_VECTOR; j++) {
sum += d_MatA[i * ELEM_PER_VECTOR + j] * d_VecX[vec_idx * ELEM_PER_VECTOR + j];
}
d_VecY[vec_idx * ELEM_PER_VECTOR + i] = sum;
}
}
void Mat_Vec_Multiply_GPU(float *p_VecX, float *p_MatA, float *p_VecY_G)
{
//아래 함수들을 사용하여 어떻게 하면 가급적 정확한 시간을 측정할 수 있을지 생각해볼 것.
CHECK_TIME_INIT_GPU()
CHECK_TIME_START_GPU()
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}///////////// if(cu..... ==CUDA_CALL
float *d_VecX, *d_MatA, *d_VecY_G;
size_t size;
size = ELEM_PER_VECTOR * n * sizeof(float);
CUDA_CALL(cudaMalloc(&d_VecX, size));
CUDA_CALL(cudaMemcpy(d_VecX, p_VecX, size, cudaMemcpyHostToDevice));
CUDA_CALL(cudaMalloc(&d_VecY_G, size));
size = ELEM_PER_VECTOR * ELEM_PER_VECTOR * sizeof(float);
CUDA_CALL(cudaMalloc(&d_MatA, size));
CUDA_CALL(cudaMemcpy(d_MatA, p_MatA, size, cudaMemcpyHostToDevice));
dim3 dimBlock = (BLOCK_SIZE);
dim3 dimGrid = (n / BLOCK_SIZE);
Mat_Vec_Multiply_Kernel << < dimGrid, dimBlock >> > (d_VecY_G, d_VecX, d_MatA, ELEM_PER_VECTOR);
CUDA_CALL(cudaMemcpy(p_VecY_G, d_VecY_G, ELEM_PER_VECTOR * n * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CALL(cudaFree(d_VecX));
CUDA_CALL(cudaFree(d_VecY_G));
CUDA_CALL(cudaFree(d_MatA));
CHECK_TIME_END_GPU(device_time)
CHECK_TIME_DEST_GPU()
}
void init_data(int size) {
srand((unsigned)time(NULL));
FILE *fp = fopen("gen.bin", "wb");
fwrite(&size, sizeof(int), 1, fp);
int i, j;
float x;
for (i = 0; i < size; i++) {
for (j = 0; j < ELEM_PER_VECTOR; j++) {
x = 2.0f*((float)rand() / RAND_MAX) - 1.0f;
fwrite(&x, sizeof(float), 1, fp);
}
}
for (i = 0; i < ELEM_PER_VECTOR; i++) {
for (j = 0; j < ELEM_PER_VECTOR; j++) {
x = 2.0f*((float)rand() / RAND_MAX) - 1.0f;
fwrite(&x, sizeof(float), 1, fp);
}
}
fclose(fp);
return;
}
int main()
{
init_data(ELEMENT_SIZE);
init_MatVec();
printf("n = %d file open ok.\n", n);
CHECK_TIME_START;
Mat_Vec_Multiply();
CHECK_TIME_END(compute_time);
printf("***CPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY[0 * ELEM_PER_VECTOR + 0], compute_time);
Mat_Vec_Multiply_GPU(pVecX, pMatA, pVecY_G);
printf("***GPU C[10] = %.3f/ Time taken = %.6fms\n", pVecY_G[0 * ELEM_PER_VECTOR + 0], device_time);
int vec_idx, i;
for (i = 0; i < ELEMENT_SIZE * ELEM_PER_VECTOR; i++) {
if (fabs(pVecY[i] - pVecY_G[i]) > 0.001) {
printf("Kernel execution fail!!\n\n");
break;
}
}
}
#endif
|
8f75bc1a0329d466802b25920adc7e725b5e1c5e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#include <hip/hip_runtime.h>
#include "nvml_monitor.h"
using std::cout;
using std::generate;
using std::vector;
//Printing takes quite a bit of time. Discount time logging when debugging
#define DEBUG 0
//Error check adopted from https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void Matmul(const int *a, const int *b, int *c, ulong N, ulong M, ulong K) {
ulong row = blockIdx.y * blockDim.y + threadIdx.y;
ulong col = blockIdx.x * blockDim.x + threadIdx.x;
//printf("Kernel Called...");
if(row < N && col < K){
c[row * K + col] = 0;
for (ulong k = 0; k < M; k++) {
c[row * K + col] += a[row * M + k] * b[k * K + col];
}
}
}
//Adopted from https://github.com/CoffeeBeforeArch/cuda_programming/blob/master/matrixMul/tiled/mmul.cu
#define SHMEM_SIZE 1024
__global__ void tiledMatmul(const int *a, const int *b, int *c, uint M, uint K, uint N) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int s_a[SHMEM_SIZE];
__shared__ int s_b[SHMEM_SIZE];
int tmp = 0;
if(row < M && col < N){
for (int i = 0; i < K; i += blockDim.x) {
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * K + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col];
__syncthreads();
for (int j = 0; j < blockDim.x; j++) {
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
}
__syncthreads();
}
}
c[row * N + col] = tmp;
}
__global__ void filter_transform(const int *filters, int *resh_filt, ulong k, ulong C, ulong K){
//Each thread is responsible for one column of output
ulong col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < K){
for (ulong i = 0; i < k*k*C; i++)
resh_filt[i*K + col] = filters[i + col*k*k*C];
}
}
__global__ void feature_transform(const int* features, int *shards, ulong H, ulong W, ulong C, ulong k){
ulong out_rows = H - k + 1;
ulong out_cols = W - k + 1;
ulong col = blockIdx.x * blockDim.x + threadIdx.x;
ulong row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < out_rows && col < out_cols){
for(ulong ch = 0; ch < C; ch++){
for (ulong u = 0; u < k; u++){
for (ulong v = 0; v < k; v++){
shards[u*k+v + k*k*C*col + k*k*C*out_cols*row + ch*k*k] = features[ch*H*W + (row + u)*W + col+v];
}
}
}
}
}
void rand_mat(int *a, uint size){
for (uint i = 0; i < size; i++)
a[i] = rand() % 100;
}
//Unused
void print_mat(int *a, int rows, int cols){
printf("\n");
for (int r = 0; r < rows; r++){
for (int c = 0; c < cols; c++)
printf("%d ", a[c+r*cols]);
printf("\n");
}
printf("\n");
}
void print_3d_tensor(int *a, int rows, int cols, int channels){
printf("\n");
for (int ch = 0; ch < channels; ch++){
for (int r = 0; r < rows; r++){
for (int c = 0; c < cols; c++)
printf("%d ", a[c + r*cols + ch*rows*cols]);
printf("\n");
}
printf("\n-----------\n");
}
printf("\n");
}
void print_4d_tensor(int *a, int rows, int cols, int channels, int number){
printf("\n");
for (int num = 0; num < number; num++){
for (int ch = 0; ch < channels; ch++){
for (int r = 0; r < rows; r++){
for (int c = 0; c < cols; c++)
printf("%d ", a[c + r*cols + ch*rows*cols + num*channels*rows*cols]);
printf("\n");
}
printf("\n-----------\n");
}
printf("\n+++++++++++\n");
}
printf("\n");
}
#define TPB 32
int main(){
srand(10); //asserting fixed seed for reproducability
std::string const fname = {"conv_gemm_shmem_32.csv"};
int dev = 0;
//Instantiate and start nvml tracing thread
NVMLMonThread logger(dev, fname);
ulong k = 3, C = 256, K = 131072;
ulong H = 224, W = 224;
ulong feat_tr_H = (W-k+1)*(H-k+1);
ulong feat_tr_W = k*k*C;
int *kern;
int *feat;
int *kern_tr;
int *feat_tr;
int *mat_res;
std::thread threadStart(&NVMLMonThread::log, &logger);
logger.caller_state = 0;
gpuErrchk(hipMallocManaged(&kern, sizeof(int)*k*k*C*K));
gpuErrchk(hipMallocManaged(&feat, sizeof(int)*H*W*C));
gpuErrchk(hipMallocManaged(&kern_tr, sizeof(int)*k*k*C*K));
gpuErrchk(hipMallocManaged(&feat_tr, sizeof(int)*feat_tr_H*feat_tr_W));
gpuErrchk(hipMallocManaged(&mat_res, sizeof(int)*feat_tr_H*K));
rand_mat(kern, k*k*C*K);
rand_mat(feat, H*W*C);
int THREADS = TPB;
ulong BLOCKS = (K + THREADS - 1)/THREADS;
logger.caller_state = 1; //Calling filter transform kernel state
hipLaunchKernelGGL(( filter_transform), dim3(BLOCKS), dim3(THREADS), 0, 0, kern, kern_tr, k, C, K);
gpuErrchk(hipDeviceSynchronize());
logger.caller_state = 2; //Calling FM transform kernel exec state
#if DEBUG
printf("Printing origin filters\n");
print_4d_tensor(kern, k, k, C, K);
printf("\nPrinting reshaped filters\n");
print_3d_tensor(kern_tr, k*k*C, K, 1);
#endif
//int THREADS_C = W-k+1;
//int THREADS_R = H-k+1;
//dim3 threads(THREADS_R, THREADS_C);
int FTTHREADS = TPB;
dim3 threads(FTTHREADS, FTTHREADS);
ulong CBLOCKS = (W-k+1 + FTTHREADS - 1) / FTTHREADS;
ulong RBLOCKS = (H-k+1 + FTTHREADS - 1) / FTTHREADS;
dim3 blocks(CBLOCKS, RBLOCKS);
hipLaunchKernelGGL(( feature_transform), dim3(blocks), dim3(threads), 0, 0, feat, feat_tr, H, W, C, k);
gpuErrchk(hipDeviceSynchronize());
logger.caller_state = 3; //Calling matmul kernel state
#if DEBUG
printf("\nPrinting original FM\n");
print_3d_tensor(feat, H, W, C);
printf("\nPrinting shards\n");
print_3d_tensor(feat_tr, feat_tr_H, feat_tr_W, 1);
#endif
int THREADS_MUL = TPB;
ulong BLOCKS_R = (feat_tr_H + THREADS_MUL - 1)/THREADS_MUL;
ulong BLOCKS_C = (K + THREADS_MUL - 1)/THREADS_MUL;
dim3 threads_mul(THREADS_MUL, THREADS_MUL);
dim3 blocks_mul(BLOCKS_C, BLOCKS_R);
hipLaunchKernelGGL(( Matmul), dim3(blocks_mul), dim3(threads_mul), 0, 0, feat_tr, kern_tr, mat_res, feat_tr_H, feat_tr_W, K);
//tiledMatmul<<<blocks_mul, threads_mul>>>(feat_tr, kern_tr, mat_res, feat_tr_H, feat_tr_W, K);
gpuErrchk(hipDeviceSynchronize());
logger.caller_state = 4; //Finished exec state.
std::thread threadKill(&NVMLMonThread::killThread, &logger);
threadStart.join();
threadKill.join();
#if DEBUG
printf("\nPrinting results\n");
print_3d_tensor(mat_res, feat_tr_H, K, 1);
#endif
hipFree(kern);
hipFree(feat);
hipFree(feat_tr);
hipFree(kern_tr);
hipFree(mat_res);
printf("\n Finished... \n");
return 0;
}
/*
uint out_rows = uint((H + 2*padding - k) / stride) + 1;
uint out_cols = uint((W + 2*padding - k) / stride) + 1;
uint temp0 = 0, temp1 = 0;
float *reshaped_filters = new float[k*k*C*K];
float *shards = new float[out_rows*out_cols*k*k*C];
//Reshaping filters from [k,k,C,K] to [k*k*C, K]
for(uint c = 0; c < K; c++){
for(uint r = 0; r < k*k*C; r++){
reshaped_filters[r*K+c] = kernel[r+c*k*k*C];
}
}
//Reshaping activations and collecting shards: Shards shape [out_rows*out_cols, k*k*C] when using SGEMM, otherwise [out_rows, out_cols, k*k*C]
for(uint r = 0; r < out_rows; r++){
for(uint c = 0; c < out_cols; c++){
for(uint ch = 0; ch < C; ch++){
for(uint u = 0; u < k; u++){
for(uint v = 0; v < k; v++){
temp0 = r + u;
temp1 = c + v;
shards[u*k+v + k*k*C*c + k*k*C*out_cols*r + ch*k*k] = data[ch*H*W+(temp0)*W+(temp1)];
}
}
}
}
}
*/
|
8f75bc1a0329d466802b25920adc7e725b5e1c5e.cu
|
#include <algorithm>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <vector>
#include <cuda.h>
#include "nvml_monitor.h"
using std::cout;
using std::generate;
using std::vector;
//Printing takes quite a bit of time. Discount time logging when debugging
#define DEBUG 0
//Error check adopted from https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void Matmul(const int *a, const int *b, int *c, ulong N, ulong M, ulong K) {
ulong row = blockIdx.y * blockDim.y + threadIdx.y;
ulong col = blockIdx.x * blockDim.x + threadIdx.x;
//printf("Kernel Called...");
if(row < N && col < K){
c[row * K + col] = 0;
for (ulong k = 0; k < M; k++) {
c[row * K + col] += a[row * M + k] * b[k * K + col];
}
}
}
//Adopted from https://github.com/CoffeeBeforeArch/cuda_programming/blob/master/matrixMul/tiled/mmul.cu
#define SHMEM_SIZE 1024
__global__ void tiledMatmul(const int *a, const int *b, int *c, uint M, uint K, uint N) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int s_a[SHMEM_SIZE];
__shared__ int s_b[SHMEM_SIZE];
int tmp = 0;
if(row < M && col < N){
for (int i = 0; i < K; i += blockDim.x) {
s_a[threadIdx.y * blockDim.x + threadIdx.x] = a[row * K + i + threadIdx.x];
s_b[threadIdx.y * blockDim.x + threadIdx.x] = b[i * N + threadIdx.y * N + col];
__syncthreads();
for (int j = 0; j < blockDim.x; j++) {
tmp += s_a[threadIdx.y * blockDim.x + j] * s_b[j * blockDim.x + threadIdx.x];
}
__syncthreads();
}
}
c[row * N + col] = tmp;
}
__global__ void filter_transform(const int *filters, int *resh_filt, ulong k, ulong C, ulong K){
//Each thread is responsible for one column of output
ulong col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < K){
for (ulong i = 0; i < k*k*C; i++)
resh_filt[i*K + col] = filters[i + col*k*k*C];
}
}
__global__ void feature_transform(const int* features, int *shards, ulong H, ulong W, ulong C, ulong k){
ulong out_rows = H - k + 1;
ulong out_cols = W - k + 1;
ulong col = blockIdx.x * blockDim.x + threadIdx.x;
ulong row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < out_rows && col < out_cols){
for(ulong ch = 0; ch < C; ch++){
for (ulong u = 0; u < k; u++){
for (ulong v = 0; v < k; v++){
shards[u*k+v + k*k*C*col + k*k*C*out_cols*row + ch*k*k] = features[ch*H*W + (row + u)*W + col+v];
}
}
}
}
}
void rand_mat(int *a, uint size){
for (uint i = 0; i < size; i++)
a[i] = rand() % 100;
}
//Unused
void print_mat(int *a, int rows, int cols){
printf("\n");
for (int r = 0; r < rows; r++){
for (int c = 0; c < cols; c++)
printf("%d ", a[c+r*cols]);
printf("\n");
}
printf("\n");
}
void print_3d_tensor(int *a, int rows, int cols, int channels){
printf("\n");
for (int ch = 0; ch < channels; ch++){
for (int r = 0; r < rows; r++){
for (int c = 0; c < cols; c++)
printf("%d ", a[c + r*cols + ch*rows*cols]);
printf("\n");
}
printf("\n-----------\n");
}
printf("\n");
}
void print_4d_tensor(int *a, int rows, int cols, int channels, int number){
printf("\n");
for (int num = 0; num < number; num++){
for (int ch = 0; ch < channels; ch++){
for (int r = 0; r < rows; r++){
for (int c = 0; c < cols; c++)
printf("%d ", a[c + r*cols + ch*rows*cols + num*channels*rows*cols]);
printf("\n");
}
printf("\n-----------\n");
}
printf("\n+++++++++++\n");
}
printf("\n");
}
#define TPB 32
int main(){
srand(10); //asserting fixed seed for reproducability
std::string const fname = {"conv_gemm_shmem_32.csv"};
int dev = 0;
//Instantiate and start nvml tracing thread
NVMLMonThread logger(dev, fname);
ulong k = 3, C = 256, K = 131072;
ulong H = 224, W = 224;
ulong feat_tr_H = (W-k+1)*(H-k+1);
ulong feat_tr_W = k*k*C;
int *kern;
int *feat;
int *kern_tr;
int *feat_tr;
int *mat_res;
std::thread threadStart(&NVMLMonThread::log, &logger);
logger.caller_state = 0;
gpuErrchk(cudaMallocManaged(&kern, sizeof(int)*k*k*C*K));
gpuErrchk(cudaMallocManaged(&feat, sizeof(int)*H*W*C));
gpuErrchk(cudaMallocManaged(&kern_tr, sizeof(int)*k*k*C*K));
gpuErrchk(cudaMallocManaged(&feat_tr, sizeof(int)*feat_tr_H*feat_tr_W));
gpuErrchk(cudaMallocManaged(&mat_res, sizeof(int)*feat_tr_H*K));
rand_mat(kern, k*k*C*K);
rand_mat(feat, H*W*C);
int THREADS = TPB;
ulong BLOCKS = (K + THREADS - 1)/THREADS;
logger.caller_state = 1; //Calling filter transform kernel state
filter_transform<<<BLOCKS, THREADS>>>(kern, kern_tr, k, C, K);
gpuErrchk(cudaDeviceSynchronize());
logger.caller_state = 2; //Calling FM transform kernel exec state
#if DEBUG
printf("Printing origin filters\n");
print_4d_tensor(kern, k, k, C, K);
printf("\nPrinting reshaped filters\n");
print_3d_tensor(kern_tr, k*k*C, K, 1);
#endif
//int THREADS_C = W-k+1;
//int THREADS_R = H-k+1;
//dim3 threads(THREADS_R, THREADS_C);
int FTTHREADS = TPB;
dim3 threads(FTTHREADS, FTTHREADS);
ulong CBLOCKS = (W-k+1 + FTTHREADS - 1) / FTTHREADS;
ulong RBLOCKS = (H-k+1 + FTTHREADS - 1) / FTTHREADS;
dim3 blocks(CBLOCKS, RBLOCKS);
feature_transform<<<blocks, threads>>>(feat, feat_tr, H, W, C, k);
gpuErrchk(cudaDeviceSynchronize());
logger.caller_state = 3; //Calling matmul kernel state
#if DEBUG
printf("\nPrinting original FM\n");
print_3d_tensor(feat, H, W, C);
printf("\nPrinting shards\n");
print_3d_tensor(feat_tr, feat_tr_H, feat_tr_W, 1);
#endif
int THREADS_MUL = TPB;
ulong BLOCKS_R = (feat_tr_H + THREADS_MUL - 1)/THREADS_MUL;
ulong BLOCKS_C = (K + THREADS_MUL - 1)/THREADS_MUL;
dim3 threads_mul(THREADS_MUL, THREADS_MUL);
dim3 blocks_mul(BLOCKS_C, BLOCKS_R);
Matmul<<<blocks_mul, threads_mul>>>(feat_tr, kern_tr, mat_res, feat_tr_H, feat_tr_W, K);
//tiledMatmul<<<blocks_mul, threads_mul>>>(feat_tr, kern_tr, mat_res, feat_tr_H, feat_tr_W, K);
gpuErrchk(cudaDeviceSynchronize());
logger.caller_state = 4; //Finished exec state.
std::thread threadKill(&NVMLMonThread::killThread, &logger);
threadStart.join();
threadKill.join();
#if DEBUG
printf("\nPrinting results\n");
print_3d_tensor(mat_res, feat_tr_H, K, 1);
#endif
cudaFree(kern);
cudaFree(feat);
cudaFree(feat_tr);
cudaFree(kern_tr);
cudaFree(mat_res);
printf("\n Finished... \n");
return 0;
}
/*
uint out_rows = uint((H + 2*padding - k) / stride) + 1;
uint out_cols = uint((W + 2*padding - k) / stride) + 1;
uint temp0 = 0, temp1 = 0;
float *reshaped_filters = new float[k*k*C*K];
float *shards = new float[out_rows*out_cols*k*k*C];
//Reshaping filters from [k,k,C,K] to [k*k*C, K]
for(uint c = 0; c < K; c++){
for(uint r = 0; r < k*k*C; r++){
reshaped_filters[r*K+c] = kernel[r+c*k*k*C];
}
}
//Reshaping activations and collecting shards: Shards shape [out_rows*out_cols, k*k*C] when using SGEMM, otherwise [out_rows, out_cols, k*k*C]
for(uint r = 0; r < out_rows; r++){
for(uint c = 0; c < out_cols; c++){
for(uint ch = 0; ch < C; ch++){
for(uint u = 0; u < k; u++){
for(uint v = 0; v < k; v++){
temp0 = r + u;
temp1 = c + v;
shards[u*k+v + k*k*C*c + k*k*C*out_cols*r + ch*k*k] = data[ch*H*W+(temp0)*W+(temp1)];
}
}
}
}
}
*/
|
af4aa197a09183a9a9b68e7c303e2859739dfda7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPGuardMasqueradingAsHIP.h>
#include <THH/THH.h>
#include <THH/THHDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
at::hip::HIPGuardMasqueradingAsCUDA device_guard(boxes.device());
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
af4aa197a09183a9a9b68e7c303e2859739dfda7.cu
|
// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#include <ATen/ATen.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAGuard.h>
#include <THC/THC.h>
#include <THC/THCDeviceUtils.cuh>
#include <vector>
#include <iostream>
int const threadsPerBlock = sizeof(unsigned long long) * 8;
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = THCCeilDiv(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
// boxes is a N x 5 tensor
at::Tensor nms_cuda(const at::Tensor boxes, float nms_overlap_thresh) {
using scalar_t = float;
AT_ASSERTM(boxes.type().is_cuda(), "boxes must be a CUDA tensor");
at::cuda::CUDAGuard device_guard(boxes.device());
auto scores = boxes.select(1, 4);
auto order_t = std::get<1>(scores.sort(0, /* descending=*/true));
auto boxes_sorted = boxes.index_select(0, order_t);
int boxes_num = boxes.size(0);
const int col_blocks = THCCeilDiv(boxes_num, threadsPerBlock);
scalar_t* boxes_dev = boxes_sorted.data<scalar_t>();
THCState *state = at::globalContext().lazyInitCUDA(); // TODO replace with getTHCState
unsigned long long* mask_dev = NULL;
//THCudaCheck(THCudaMalloc(state, (void**) &mask_dev,
// boxes_num * col_blocks * sizeof(unsigned long long)));
mask_dev = (unsigned long long*) THCudaMalloc(state, boxes_num * col_blocks * sizeof(unsigned long long));
dim3 blocks(THCCeilDiv(boxes_num, threadsPerBlock),
THCCeilDiv(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
THCudaCheck(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
at::Tensor keep = at::empty({boxes_num}, boxes.options().dtype(at::kLong).device(at::kCPU));
int64_t* keep_out = keep.data<int64_t>();
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep_out[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
THCudaFree(state, mask_dev);
// TODO improve this part
return std::get<0>(order_t.index({
keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep).to(
order_t.device(), keep.scalar_type())
}).sort(0, false));
}
|
205693b3ebf15edb74f8c5392a9bf9a1fdd6127e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Implementation for the generic cuda solution.
* Manages multiple GPUs on a single node using single-GPU implementations
* defined in cuda subdirectories (cuda/core, cuda/generic etc)
*/
#include "cuda_generic.cuh"
#include "core/cuda_core.cuh"
#include "core/dconsts_core.cuh"
#include "core/errorhandler_cuda.cuh"
#include "core/concur_cuda_core.cuh"
#include "generic/collectiveops_cuda_generic.cuh"
#include "generic/rk3_cuda_generic.cuh"
#include "generic/boundcond_cuda_generic.cuh"
#include "generic/slice_cuda_generic.cuh"
/*
* Host configs.
* These contain the information of the whole grid stored in this node.
* (f.ex. the grid dimensions before it has been decomposed for each GPU)
*/
static CParamConfig h_cparams;
static RunConfig h_run_params;
static bool is_initialized;
/*
* Struct for storing the necessary information for running any of the
* single-GPU implementations on some specific GPU.
*
* Contains f.ex. the "local" grid dimensions (d_cparams, dimensions after
* decomposing the host grid) and the starting index (start_idx) in the host
* grid for mapping from local grid to host grid coordinates.
*
* These contexts are stored in a static global array, gpu_contexts.
* If we wanted to integrate on, say, device 1 then we would set the current
* device to (1) and pass the necessary information from gpu_contexts[1] to the
* integration kernel.
*
*/
typedef struct {
vec3i start_idx; //The starting coordinates in the host array (node-wide grid)
CParamConfig d_cparams; //Local CParamConfig for the device (GPU-specific grid)
Grid d_grid, d_grid_dst; //Core arrays
Slice d_slice; //Slice arrays
ReductionArray d_reduct_arr; //Reduction arrays
real* d_halobuffer; //Buffer used for multi-node halo transfers
ConcurContext concur_ctx; //Device-specific streams and events
} GPUContext;
static GPUContext* gpu_contexts;
static int num_devices = -1;
static inline void swap_ptrs(real** a, real** b)
{
real* temp = *a;
*a = *b;
*b = temp;
}
static inline void swap_grid_ptrs(Grid* d_grid, Grid* d_grid_dst)
{
for (int i=0; i < NUM_ARRS; ++i)
swap_ptrs(&(d_grid->arr[i]), &(d_grid_dst->arr[i]));
}
static inline hipStream_t get_stream(const int device_id, const StreamName str)
{
return gpu_contexts[device_id].concur_ctx.streams[str];
}
static inline hipEvent_t get_event(const int device_id, const EventName ev)
{
return gpu_contexts[device_id].concur_ctx.events[ev];
}
static void sync_devices()
{
int curr_device;
hipGetDevice(&curr_device);
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
hipDeviceSynchronize();
}
hipSetDevice(curr_device);
}
typedef enum {PEER_FRONT, PEER_BACK, NUM_PEERS} PeerType;
static int get_peer(PeerType pt, int device_id)
{
switch (pt) {
case PEER_FRONT:
return (device_id+1) % num_devices;
case PEER_BACK:
return (num_devices+device_id-1) % num_devices;
default:
CRASH("Invalid PeerType");
}
}
//TODO NOTE: peer access not supported between 4x p100, why?
//TEMP FIX: Commented out peer access enabling, now runs out-of-the-box
//on p100. Surprisingly peer access seems to work even without explicitly
//enabling it also on k80s.
static void set_peer_access(int device_id, bool enable_access)
{
const int peer_front = get_peer(PEER_FRONT, device_id);
const int peer_back = get_peer(PEER_BACK, device_id);
/*
if (device_id != peer_front) {
if (enable_access)
hipDeviceEnablePeerAccess(peer_front, 0);
else
hipDeviceDisablePeerAccess(peer_front);
}
if (device_id != peer_back && peer_front != peer_back) {
if (enable_access)
hipDeviceEnablePeerAccess(peer_back, 0);
else
hipDeviceDisablePeerAccess(peer_back);
}*/
}
/*
* Handles the allocation and initialization of the memories of all GPUs on
* the node (incl. constant memory).
*/
__global__ void dummy_kernel() {}
void init_cuda_generic(CParamConfig* cparamconf, RunConfig* runconf)
{
if (is_initialized) { CRASH("cuda_generic already initialized!") }
hipGetDeviceCount(&num_devices);
gpu_contexts = (GPUContext*) malloc(sizeof(GPUContext)*num_devices);
printf("Using %d devices\n", num_devices);
print_gpu_config_cuda_core();
//Copy the structs in case the caller deallocates them prematurely
h_cparams = *cparamconf;
h_run_params = *runconf;
//#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//printf("%d\n", __CUDA_ARCH__);
printf("Trying to run a dummy kernel. If this fails, make sure that your\n"
"device supports the CUDA architecture you are compiling for.\n"
"Running dummy kernel... "); fflush(stdout);
hipLaunchKernelGGL(( dummy_kernel), dim3(1), dim3(1), 0, 0, );
CUDA_ERRCHK_KERNEL_ALWAYS();
printf("Success!\n");
//Enable peer access
set_peer_access(device_id, true);
//Decompose the problem
ctx->d_cparams = h_cparams;
ctx->d_cparams.nz = h_cparams.nz / num_devices; //Slice the z axis
ctx->d_cparams.compute_missing_values(); //Purkka
ctx->d_cparams.dsx = h_cparams.dsx;
ctx->d_cparams.dsy = h_cparams.dsy;
ctx->d_cparams.dsz = h_cparams.dsz;
ctx->d_cparams.dsmin = h_cparams.dsmin;
ctx->start_idx = (vec3i){0, 0, device_id * ctx->d_cparams.nz};
printf("%d and start %d\n", ctx->d_cparams.nz, ctx->start_idx.z);
//Allocate and init memory on the GPU
load_hydro_dconsts_cuda_core(&ctx->d_cparams, &h_run_params, ctx->start_idx);
init_grid_cuda_core(&ctx->d_grid, &ctx->d_grid_dst, &ctx->d_cparams);
init_slice_cuda_generic(&ctx->d_slice, &ctx->d_cparams, &h_run_params);
init_reduction_array_cuda_generic(&ctx->d_reduct_arr, &ctx->d_cparams);
init_halo_cuda_core(ctx->d_halobuffer); //Note: Called even without multi-node
init_concur_ctx(&ctx->concur_ctx);
}
is_initialized = true;
}
/*
* Deallocates all memory on the GPU
*/
void destroy_cuda_generic()
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!"); }
//Sync all previous operations
sync_devices();
//Destroy everything
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Disable peer access
set_peer_access(device_id, false);
destroy_slice_cuda_generic(&ctx->d_slice);
destroy_reduction_array_cuda_generic(&ctx->d_reduct_arr);
destroy_grid_cuda_core(&ctx->d_grid, &ctx->d_grid_dst);
destroy_halo_cuda_core(ctx->d_halobuffer);
destroy_concur_ctx(&ctx->concur_ctx);
}
//Belt-and-suspenders-destroy-everything
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
hipDeviceReset();
}
free(gpu_contexts);
is_initialized = false;
}
void load_grid_cuda_generic(Grid* h_grid)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
//If we wanted to use another layout, we would do it here instead of using the core interface
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
load_grid_cuda_core(&ctx->d_grid, &ctx->d_cparams, &ctx->start_idx, h_grid, &h_cparams);
}
}
void store_grid_cuda_generic(Grid* h_grid)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
store_grid_cuda_core(h_grid, &h_cparams, &ctx->d_grid, &ctx->d_cparams, &ctx->start_idx);
}
}
static void local_boundconds_cuda_generic()
{
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Do local boundaries and signal when done
periodic_xy_boundconds_cuda_generic(&ctx->d_grid, &ctx->d_cparams, 0);
const hipEvent_t local_bc_done = get_event(device_id, EVENT_LOCAL_BC_DONE);
hipEventRecord(local_bc_done, 0);//Implicit synchronization with the default stream
}
}
static void fetch_halos_cuda_generic(GPUContext* ctx, const int device_id, hipStream_t stream=0)
{
const int front_id = get_peer(PEER_FRONT, device_id);
const int back_id = get_peer(PEER_BACK, device_id);
const size_t slab_size = ctx->d_cparams.mx * ctx->d_cparams.my;
const size_t transfer_size_bytes = BOUND_SIZE * slab_size * sizeof(real);
const size_t z_src0 = ctx->d_cparams.nz * slab_size;
const size_t z_dst0 = 0;
const size_t z_src1 = BOUND_SIZE * slab_size;
const size_t z_dst1 = (ctx->d_cparams.nz + BOUND_SIZE) * slab_size;
for (int w=0; w < NUM_ARRS; ++w) {
CUDA_ERRCHK( hipMemcpyPeerAsync(&ctx->d_grid.arr[w][z_dst0], device_id,
&gpu_contexts[back_id].d_grid.arr[w][z_src0], back_id,
transfer_size_bytes, stream) ); //Back
CUDA_ERRCHK( hipMemcpyPeerAsync(&ctx->d_grid.arr[w][z_dst1], device_id,
&gpu_contexts[front_id].d_grid.arr[w][z_src1], front_id,
transfer_size_bytes, stream) ); //Front
}
}
static void exchange_halos_cuda_generic()
{
#pragma omp parallel for num_threads(num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Wait until front and back neighbors are done with local boundary conditions
const hipStream_t global_stream = get_stream(device_id, STREAM_GLOBAL);
const int peer_front = get_peer(PEER_FRONT, device_id);
const int peer_back = get_peer(PEER_BACK, device_id);
hipStreamWaitEvent(global_stream, get_event(peer_front, EVENT_LOCAL_BC_DONE), 0);
hipStreamWaitEvent(global_stream, get_event(peer_back, EVENT_LOCAL_BC_DONE), 0);
//Get the updated halos from the front and back neighbor
fetch_halos_cuda_generic(ctx, device_id, global_stream);
}
}
void boundcond_step_cuda_generic()
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
local_boundconds_cuda_generic();
exchange_halos_cuda_generic();
}
void integrate_step_cuda_generic(int isubstep, real dt)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
//For all GPUs in the node in parallel
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Integrate
rk3_inner_cuda_generic(&ctx->d_grid, &ctx->d_grid_dst, isubstep, dt,
&ctx->d_cparams,
ctx->concur_ctx.streams[STREAM_LOCAL_HYDRO],
ctx->concur_ctx.streams[STREAM_LOCAL_INDUCT]);
//WARNING: boundcond_step must have been called before rk3_outer.
//If fetch_halos_cuda_generic() is not already be scheduled for execution
//on the GPU, then the execution order will be wrong
rk3_outer_cuda_generic(&ctx->d_grid, &ctx->d_grid_dst, isubstep, dt,
&ctx->d_cparams,
ctx->concur_ctx.streams[STREAM_GLOBAL]);
//Swap src and dst device array pointers
swap_grid_ptrs(&ctx->d_grid, &ctx->d_grid_dst);
}
//WARNING: this sync is not absolutely necessary but left here for safety:
//without sync the host caller is able to execute other (potentially dangerous)
//code in parallel with the GPU integration/memory transfers
sync_devices(); //WARNING
}
void integrate_cuda_generic(real dt)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
for (int isubstep=0; isubstep < 3; ++isubstep) {
boundcond_step_cuda_generic();
integrate_step_cuda_generic(isubstep, dt);
//The original concurrency code, left here since it's easier to read
//when boundary conditions and integration are not split up into separate
//functions
/*
//Local boundaries and integration in the inner domain
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Do local boundaries and signal when done
periodic_xy_boundconds_cuda_generic(&ctx->d_grid, &ctx->d_cparams, 0);
const hipEvent_t local_bc_done = get_event(device_id, EVENT_LOCAL_BC_DONE);
hipEventRecord(local_bc_done, 0);//Implicit synchronization with the default stream
//Start integrating in the inner computational domain
rk3_inner_cuda_generic(&ctx->d_grid, &ctx->d_grid_dst, isubstep, dt,
&ctx->d_cparams,
ctx->concur_ctx.streams[STREAM_LOCAL_HYDRO],
ctx->concur_ctx.streams[STREAM_LOCAL_INDUCT]);
}
//Communication of the outer halos among devices
#pragma omp parallel for num_threads(num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Wait until front and back neighbors are done with local boundary conditions
const hipStream_t global_stream = get_stream(device_id, STREAM_GLOBAL);
const int peer_front = get_peer(PEER_FRONT, device_id);
const int peer_back = get_peer(PEER_BACK, device_id);
hipStreamWaitEvent(global_stream, get_event(peer_front, EVENT_LOCAL_BC_DONE), 0);
hipStreamWaitEvent(global_stream, get_event(peer_back, EVENT_LOCAL_BC_DONE), 0);
//Get the updated halos from the front and back neighbor
fetch_halos_cuda_generic(ctx, device_id, global_stream);
}
//Integrate in the outer computational domain
#pragma omp parallel for num_threads(num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Start integrating the outer domain after the updated halos
//have arrived from neighbors
rk3_outer_cuda_generic(&ctx->d_grid, &ctx->d_grid_dst, isubstep, dt,
&ctx->d_cparams,
ctx->concur_ctx.streams[STREAM_GLOBAL]);
//We're done, swap src and dst device array pointers
swap_grid_ptrs(&ctx->d_grid, &ctx->d_grid_dst);
}*/
}
}
#include "utils/utils.h" //For max/min/sum
real reduce_cuda_generic(ReductType t, GridType grid_type)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!"); }
real* res = (real*) malloc(sizeof(real)*num_devices);
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
if (t == MAX_VEC_UU || t == MIN_VEC_UU || t == RMS_VEC_UU) {
if (grid_type != NOT_APPLICABLE) {
printf("Note: other than NOT_APPLICABLE passed to reduce_cuda_generic as ArrType."
"This has no effect when a vector ReductType is selected\n");
}
res[device_id] = get_reduction_cuda_generic(&ctx->d_reduct_arr, t, &ctx->d_cparams,
ctx->d_grid.arr[UUX], ctx->d_grid.arr[UUY], ctx->d_grid.arr[UUZ]);
} else {
if (grid_type == NOT_APPLICABLE) { CRASH("Invalid GridType in reduce_cuda_generic"); }
res[device_id] = get_reduction_cuda_generic(&ctx->d_reduct_arr, t, &ctx->d_cparams, ctx->d_grid.arr[grid_type]);
}
}
//Bruteforce: find max, min or rms from the gpu results
////#pragma omp parallel target teams distribute parallel for reduction(+:r)//TODO
for (int i=1; i < num_devices; ++i) {
if (t == MAX_VEC_UU || t == MAX_SCAL)
res[0] = max(res[0], res[i]);
else if (t == MIN_VEC_UU || t == MIN_SCAL)
res[0] = min(res[0], res[i]);
else if (t == RMS_VEC_UU || t == RMS_SCAL || t == RMS_EXP)
res[0] = sum(res[0], res[i]);
else
CRASH("Unexpected ReductType in reduce_cuda_generic()");
}
if (t == RMS_VEC_UU || t == RMS_SCAL || t == RMS_EXP)
res[0] = sqrt(res[0] * 1.0 / (h_cparams.nx*h_cparams.ny*h_cparams.nz));//TODO note, not correct for non-equidistant grids
const real retval = res[0];
free(res);
return retval;
}
void get_slice_cuda_generic(Slice* h_slice)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!"); }
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
update_slice_cuda_generic(&ctx->d_slice, &ctx->d_grid, &ctx->d_cparams, &h_run_params);
hipDeviceSynchronize();
store_slice_cuda_core(h_slice, &h_cparams, &h_run_params, &ctx->d_slice, &ctx->d_cparams, &ctx->start_idx);
}
//cd src/build/ && make -j && ac_srun_taito_multigpu 4 && cd ../../ && screen py_animate_data --nslices=100
}
void load_forcing_params_cuda_generic(ForcingParams* forcing_params)
{
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
load_forcing_dconsts_cuda_core(forcing_params);
}
}
void load_outer_halos_cuda_generic(Grid* h_grid, real* h_halobuffer)
{
////#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
load_outer_halo_cuda_core(&ctx->d_grid, ctx->d_halobuffer, &ctx->d_cparams,
h_grid, h_halobuffer, &h_cparams, &ctx->start_idx);
}
}
void store_internal_halos_cuda_generic(Grid* h_grid, real* h_halobuffer)
{
////#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
hipSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
store_internal_halo_cuda_core(h_grid, h_halobuffer, &h_cparams, &ctx->start_idx,
&ctx->d_grid, ctx->d_halobuffer, &ctx->d_cparams);
}
}
|
205693b3ebf15edb74f8c5392a9bf9a1fdd6127e.cu
|
/*
* Implementation for the generic cuda solution.
* Manages multiple GPUs on a single node using single-GPU implementations
* defined in cuda subdirectories (cuda/core, cuda/generic etc)
*/
#include "cuda_generic.cuh"
#include "core/cuda_core.cuh"
#include "core/dconsts_core.cuh"
#include "core/errorhandler_cuda.cuh"
#include "core/concur_cuda_core.cuh"
#include "generic/collectiveops_cuda_generic.cuh"
#include "generic/rk3_cuda_generic.cuh"
#include "generic/boundcond_cuda_generic.cuh"
#include "generic/slice_cuda_generic.cuh"
/*
* Host configs.
* These contain the information of the whole grid stored in this node.
* (f.ex. the grid dimensions before it has been decomposed for each GPU)
*/
static CParamConfig h_cparams;
static RunConfig h_run_params;
static bool is_initialized;
/*
* Struct for storing the necessary information for running any of the
* single-GPU implementations on some specific GPU.
*
* Contains f.ex. the "local" grid dimensions (d_cparams, dimensions after
* decomposing the host grid) and the starting index (start_idx) in the host
* grid for mapping from local grid to host grid coordinates.
*
* These contexts are stored in a static global array, gpu_contexts.
* If we wanted to integrate on, say, device 1 then we would set the current
* device to (1) and pass the necessary information from gpu_contexts[1] to the
* integration kernel.
*
*/
typedef struct {
vec3i start_idx; //The starting coordinates in the host array (node-wide grid)
CParamConfig d_cparams; //Local CParamConfig for the device (GPU-specific grid)
Grid d_grid, d_grid_dst; //Core arrays
Slice d_slice; //Slice arrays
ReductionArray d_reduct_arr; //Reduction arrays
real* d_halobuffer; //Buffer used for multi-node halo transfers
ConcurContext concur_ctx; //Device-specific streams and events
} GPUContext;
static GPUContext* gpu_contexts;
static int num_devices = -1;
static inline void swap_ptrs(real** a, real** b)
{
real* temp = *a;
*a = *b;
*b = temp;
}
static inline void swap_grid_ptrs(Grid* d_grid, Grid* d_grid_dst)
{
for (int i=0; i < NUM_ARRS; ++i)
swap_ptrs(&(d_grid->arr[i]), &(d_grid_dst->arr[i]));
}
static inline cudaStream_t get_stream(const int device_id, const StreamName str)
{
return gpu_contexts[device_id].concur_ctx.streams[str];
}
static inline cudaEvent_t get_event(const int device_id, const EventName ev)
{
return gpu_contexts[device_id].concur_ctx.events[ev];
}
static void sync_devices()
{
int curr_device;
cudaGetDevice(&curr_device);
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
cudaDeviceSynchronize();
}
cudaSetDevice(curr_device);
}
typedef enum {PEER_FRONT, PEER_BACK, NUM_PEERS} PeerType;
static int get_peer(PeerType pt, int device_id)
{
switch (pt) {
case PEER_FRONT:
return (device_id+1) % num_devices;
case PEER_BACK:
return (num_devices+device_id-1) % num_devices;
default:
CRASH("Invalid PeerType");
}
}
//TODO NOTE: peer access not supported between 4x p100, why?
//TEMP FIX: Commented out peer access enabling, now runs out-of-the-box
//on p100. Surprisingly peer access seems to work even without explicitly
//enabling it also on k80s.
static void set_peer_access(int device_id, bool enable_access)
{
const int peer_front = get_peer(PEER_FRONT, device_id);
const int peer_back = get_peer(PEER_BACK, device_id);
/*
if (device_id != peer_front) {
if (enable_access)
cudaDeviceEnablePeerAccess(peer_front, 0);
else
cudaDeviceDisablePeerAccess(peer_front);
}
if (device_id != peer_back && peer_front != peer_back) {
if (enable_access)
cudaDeviceEnablePeerAccess(peer_back, 0);
else
cudaDeviceDisablePeerAccess(peer_back);
}*/
}
/*
* Handles the allocation and initialization of the memories of all GPUs on
* the node (incl. constant memory).
*/
__global__ void dummy_kernel() {}
void init_cuda_generic(CParamConfig* cparamconf, RunConfig* runconf)
{
if (is_initialized) { CRASH("cuda_generic already initialized!") }
cudaGetDeviceCount(&num_devices);
gpu_contexts = (GPUContext*) malloc(sizeof(GPUContext)*num_devices);
printf("Using %d devices\n", num_devices);
print_gpu_config_cuda_core();
//Copy the structs in case the caller deallocates them prematurely
h_cparams = *cparamconf;
h_run_params = *runconf;
//#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//printf("%d\n", __CUDA_ARCH__);
printf("Trying to run a dummy kernel. If this fails, make sure that your\n"
"device supports the CUDA architecture you are compiling for.\n"
"Running dummy kernel... "); fflush(stdout);
dummy_kernel<<<1, 1>>>();
CUDA_ERRCHK_KERNEL_ALWAYS();
printf("Success!\n");
//Enable peer access
set_peer_access(device_id, true);
//Decompose the problem
ctx->d_cparams = h_cparams;
ctx->d_cparams.nz = h_cparams.nz / num_devices; //Slice the z axis
ctx->d_cparams.compute_missing_values(); //Purkka
ctx->d_cparams.dsx = h_cparams.dsx;
ctx->d_cparams.dsy = h_cparams.dsy;
ctx->d_cparams.dsz = h_cparams.dsz;
ctx->d_cparams.dsmin = h_cparams.dsmin;
ctx->start_idx = (vec3i){0, 0, device_id * ctx->d_cparams.nz};
printf("%d and start %d\n", ctx->d_cparams.nz, ctx->start_idx.z);
//Allocate and init memory on the GPU
load_hydro_dconsts_cuda_core(&ctx->d_cparams, &h_run_params, ctx->start_idx);
init_grid_cuda_core(&ctx->d_grid, &ctx->d_grid_dst, &ctx->d_cparams);
init_slice_cuda_generic(&ctx->d_slice, &ctx->d_cparams, &h_run_params);
init_reduction_array_cuda_generic(&ctx->d_reduct_arr, &ctx->d_cparams);
init_halo_cuda_core(ctx->d_halobuffer); //Note: Called even without multi-node
init_concur_ctx(&ctx->concur_ctx);
}
is_initialized = true;
}
/*
* Deallocates all memory on the GPU
*/
void destroy_cuda_generic()
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!"); }
//Sync all previous operations
sync_devices();
//Destroy everything
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Disable peer access
set_peer_access(device_id, false);
destroy_slice_cuda_generic(&ctx->d_slice);
destroy_reduction_array_cuda_generic(&ctx->d_reduct_arr);
destroy_grid_cuda_core(&ctx->d_grid, &ctx->d_grid_dst);
destroy_halo_cuda_core(ctx->d_halobuffer);
destroy_concur_ctx(&ctx->concur_ctx);
}
//Belt-and-suspenders-destroy-everything
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
cudaDeviceReset();
}
free(gpu_contexts);
is_initialized = false;
}
void load_grid_cuda_generic(Grid* h_grid)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
//If we wanted to use another layout, we would do it here instead of using the core interface
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
load_grid_cuda_core(&ctx->d_grid, &ctx->d_cparams, &ctx->start_idx, h_grid, &h_cparams);
}
}
void store_grid_cuda_generic(Grid* h_grid)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
store_grid_cuda_core(h_grid, &h_cparams, &ctx->d_grid, &ctx->d_cparams, &ctx->start_idx);
}
}
static void local_boundconds_cuda_generic()
{
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Do local boundaries and signal when done
periodic_xy_boundconds_cuda_generic(&ctx->d_grid, &ctx->d_cparams, 0);
const cudaEvent_t local_bc_done = get_event(device_id, EVENT_LOCAL_BC_DONE);
cudaEventRecord(local_bc_done, 0);//Implicit synchronization with the default stream
}
}
static void fetch_halos_cuda_generic(GPUContext* ctx, const int device_id, cudaStream_t stream=0)
{
const int front_id = get_peer(PEER_FRONT, device_id);
const int back_id = get_peer(PEER_BACK, device_id);
const size_t slab_size = ctx->d_cparams.mx * ctx->d_cparams.my;
const size_t transfer_size_bytes = BOUND_SIZE * slab_size * sizeof(real);
const size_t z_src0 = ctx->d_cparams.nz * slab_size;
const size_t z_dst0 = 0;
const size_t z_src1 = BOUND_SIZE * slab_size;
const size_t z_dst1 = (ctx->d_cparams.nz + BOUND_SIZE) * slab_size;
for (int w=0; w < NUM_ARRS; ++w) {
CUDA_ERRCHK( cudaMemcpyPeerAsync(&ctx->d_grid.arr[w][z_dst0], device_id,
&gpu_contexts[back_id].d_grid.arr[w][z_src0], back_id,
transfer_size_bytes, stream) ); //Back
CUDA_ERRCHK( cudaMemcpyPeerAsync(&ctx->d_grid.arr[w][z_dst1], device_id,
&gpu_contexts[front_id].d_grid.arr[w][z_src1], front_id,
transfer_size_bytes, stream) ); //Front
}
}
static void exchange_halos_cuda_generic()
{
#pragma omp parallel for num_threads(num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Wait until front and back neighbors are done with local boundary conditions
const cudaStream_t global_stream = get_stream(device_id, STREAM_GLOBAL);
const int peer_front = get_peer(PEER_FRONT, device_id);
const int peer_back = get_peer(PEER_BACK, device_id);
cudaStreamWaitEvent(global_stream, get_event(peer_front, EVENT_LOCAL_BC_DONE), 0);
cudaStreamWaitEvent(global_stream, get_event(peer_back, EVENT_LOCAL_BC_DONE), 0);
//Get the updated halos from the front and back neighbor
fetch_halos_cuda_generic(ctx, device_id, global_stream);
}
}
void boundcond_step_cuda_generic()
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
local_boundconds_cuda_generic();
exchange_halos_cuda_generic();
}
void integrate_step_cuda_generic(int isubstep, real dt)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
//For all GPUs in the node in parallel
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Integrate
rk3_inner_cuda_generic(&ctx->d_grid, &ctx->d_grid_dst, isubstep, dt,
&ctx->d_cparams,
ctx->concur_ctx.streams[STREAM_LOCAL_HYDRO],
ctx->concur_ctx.streams[STREAM_LOCAL_INDUCT]);
//WARNING: boundcond_step must have been called before rk3_outer.
//If fetch_halos_cuda_generic() is not already be scheduled for execution
//on the GPU, then the execution order will be wrong
rk3_outer_cuda_generic(&ctx->d_grid, &ctx->d_grid_dst, isubstep, dt,
&ctx->d_cparams,
ctx->concur_ctx.streams[STREAM_GLOBAL]);
//Swap src and dst device array pointers
swap_grid_ptrs(&ctx->d_grid, &ctx->d_grid_dst);
}
//WARNING: this sync is not absolutely necessary but left here for safety:
//without sync the host caller is able to execute other (potentially dangerous)
//code in parallel with the GPU integration/memory transfers
sync_devices(); //WARNING
}
void integrate_cuda_generic(real dt)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!") }
for (int isubstep=0; isubstep < 3; ++isubstep) {
boundcond_step_cuda_generic();
integrate_step_cuda_generic(isubstep, dt);
//The original concurrency code, left here since it's easier to read
//when boundary conditions and integration are not split up into separate
//functions
/*
//Local boundaries and integration in the inner domain
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Do local boundaries and signal when done
periodic_xy_boundconds_cuda_generic(&ctx->d_grid, &ctx->d_cparams, 0);
const cudaEvent_t local_bc_done = get_event(device_id, EVENT_LOCAL_BC_DONE);
cudaEventRecord(local_bc_done, 0);//Implicit synchronization with the default stream
//Start integrating in the inner computational domain
rk3_inner_cuda_generic(&ctx->d_grid, &ctx->d_grid_dst, isubstep, dt,
&ctx->d_cparams,
ctx->concur_ctx.streams[STREAM_LOCAL_HYDRO],
ctx->concur_ctx.streams[STREAM_LOCAL_INDUCT]);
}
//Communication of the outer halos among devices
#pragma omp parallel for num_threads(num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Wait until front and back neighbors are done with local boundary conditions
const cudaStream_t global_stream = get_stream(device_id, STREAM_GLOBAL);
const int peer_front = get_peer(PEER_FRONT, device_id);
const int peer_back = get_peer(PEER_BACK, device_id);
cudaStreamWaitEvent(global_stream, get_event(peer_front, EVENT_LOCAL_BC_DONE), 0);
cudaStreamWaitEvent(global_stream, get_event(peer_back, EVENT_LOCAL_BC_DONE), 0);
//Get the updated halos from the front and back neighbor
fetch_halos_cuda_generic(ctx, device_id, global_stream);
}
//Integrate in the outer computational domain
#pragma omp parallel for num_threads(num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
//Start integrating the outer domain after the updated halos
//have arrived from neighbors
rk3_outer_cuda_generic(&ctx->d_grid, &ctx->d_grid_dst, isubstep, dt,
&ctx->d_cparams,
ctx->concur_ctx.streams[STREAM_GLOBAL]);
//We're done, swap src and dst device array pointers
swap_grid_ptrs(&ctx->d_grid, &ctx->d_grid_dst);
}*/
}
}
#include "utils/utils.h" //For max/min/sum
real reduce_cuda_generic(ReductType t, GridType grid_type)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!"); }
real* res = (real*) malloc(sizeof(real)*num_devices);
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
if (t == MAX_VEC_UU || t == MIN_VEC_UU || t == RMS_VEC_UU) {
if (grid_type != NOT_APPLICABLE) {
printf("Note: other than NOT_APPLICABLE passed to reduce_cuda_generic as ArrType."
"This has no effect when a vector ReductType is selected\n");
}
res[device_id] = get_reduction_cuda_generic(&ctx->d_reduct_arr, t, &ctx->d_cparams,
ctx->d_grid.arr[UUX], ctx->d_grid.arr[UUY], ctx->d_grid.arr[UUZ]);
} else {
if (grid_type == NOT_APPLICABLE) { CRASH("Invalid GridType in reduce_cuda_generic"); }
res[device_id] = get_reduction_cuda_generic(&ctx->d_reduct_arr, t, &ctx->d_cparams, ctx->d_grid.arr[grid_type]);
}
}
//Bruteforce: find max, min or rms from the gpu results
////#pragma omp parallel target teams distribute parallel for reduction(+:r)//TODO
for (int i=1; i < num_devices; ++i) {
if (t == MAX_VEC_UU || t == MAX_SCAL)
res[0] = max(res[0], res[i]);
else if (t == MIN_VEC_UU || t == MIN_SCAL)
res[0] = min(res[0], res[i]);
else if (t == RMS_VEC_UU || t == RMS_SCAL || t == RMS_EXP)
res[0] = sum(res[0], res[i]);
else
CRASH("Unexpected ReductType in reduce_cuda_generic()");
}
if (t == RMS_VEC_UU || t == RMS_SCAL || t == RMS_EXP)
res[0] = sqrt(res[0] * 1.0 / (h_cparams.nx*h_cparams.ny*h_cparams.nz));//TODO note, not correct for non-equidistant grids
const real retval = res[0];
free(res);
return retval;
}
void get_slice_cuda_generic(Slice* h_slice)
{
if (!is_initialized) { CRASH("cuda_generic wasn't initialized!"); }
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
update_slice_cuda_generic(&ctx->d_slice, &ctx->d_grid, &ctx->d_cparams, &h_run_params);
cudaDeviceSynchronize();
store_slice_cuda_core(h_slice, &h_cparams, &h_run_params, &ctx->d_slice, &ctx->d_cparams, &ctx->start_idx);
}
//cd src/build/ && make -j && ac_srun_taito_multigpu 4 && cd ../../ && screen py_animate_data --nslices=100
}
void load_forcing_params_cuda_generic(ForcingParams* forcing_params)
{
#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
load_forcing_dconsts_cuda_core(forcing_params);
}
}
void load_outer_halos_cuda_generic(Grid* h_grid, real* h_halobuffer)
{
////#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
load_outer_halo_cuda_core(&ctx->d_grid, ctx->d_halobuffer, &ctx->d_cparams,
h_grid, h_halobuffer, &h_cparams, &ctx->start_idx);
}
}
void store_internal_halos_cuda_generic(Grid* h_grid, real* h_halobuffer)
{
////#pragma omp parallel for num_threads (num_devices)
for (int device_id=0; device_id < num_devices; ++device_id) {
cudaSetDevice(device_id);
GPUContext* ctx = &gpu_contexts[device_id];
store_internal_halo_cuda_core(h_grid, h_halobuffer, &h_cparams, &ctx->start_idx,
&ctx->d_grid, ctx->d_halobuffer, &ctx->d_cparams);
}
}
|
93dbf50268d593832b78a5080b715c3b2e30c03f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <malloc.h>
#include <math.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <helper_cuda.h>
#include "GLCMCalculationGPU.cuh"
__global__ void GLCMNormalize_kernel(float* glcm, const int R)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
glcm[id] /= R;
}
void GLCMgpu_NormalizeGLCM(GLCMInfo &gi)
{
hipLaunchKernelGGL(( GLCMNormalize_kernel), dim3(gi.depth), dim3(gi.depth), 0, 0, gi.d_glcm, gi.R);
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
__global__ void GLCMPerGrid_kernel(const unsigned char* intensity, float* glcm, const int rows, const int cols,
const int depth, const int xmin, const int xmax, const int ymin, const int copixel)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int iid = x + y * cols;
if (x >= xmin && x < xmax && y >= ymin && y < rows)
{
unsigned char i = intensity[iid];
unsigned char j = intensity[iid + copixel];
atomicAdd(&(glcm[i * depth + j]), 1);
atomicAdd(&(glcm[j * depth + i]), 1);
}
}
void GLCMPerGrid(GLCMInfo &gi, float* time)
{
const int K = 16;
int xmin = 0, xmax = 0, ymin = 0, copixel = 0;
if (gi.angle == 0) { xmin = 0; xmax = gi.cols - gi.distance; ymin = 0; copixel = 1; }
else if (gi.angle == 45) { xmin = 0; xmax = gi.cols - gi.distance; ymin = gi.distance; copixel = 1 - gi.cols; }
else if (gi.angle == 90) { xmin = 0; xmax = gi.cols; ymin = gi.distance; copixel = -(gi.cols); }
else if (gi.angle == 135) { xmin = gi.distance; xmax = gi.cols; ymin = gi.distance; copixel = -1 - gi.cols; }
else {} // invalid angle -- handle?
dim3 blocks((gi.cols + K - 1) / K, (gi.rows + K - 1) / K);
dim3 threads(K, K);
hipEvent_t start = 0, stop = 0;
checkCudaErrors(hipEventCreate(&start, 0));
checkCudaErrors(hipEventCreate(&stop, 0));
checkCudaErrors(hipEventRecord(start, 0));
hipLaunchKernelGGL(( GLCMPerGrid_kernel), dim3(blocks), dim3(threads), 0, 0, gi.d_intensity, gi.d_glcm, gi.rows, gi.cols, gi.depth, xmin, xmax, ymin, copixel);
checkCudaErrors(hipEventRecord(stop, 0));
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipEventElapsedTime(time, start, stop));
hipEventDestroy(start);
hipEventDestroy(stop);
}
__global__ void GLCMPerBlock_kernel(const unsigned char* intensity, float* glcm, const int rows, const int cols,
const int depth, const int xmin, const int xmax, const int ymin, const int copixel)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int iid = x + y * cols;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int blocksize = blockDim.x * blockDim.y;
int glcmsize = depth * depth;
extern __shared__ int glcmshared[];
int k = tid;
while (k < glcmsize)
{
glcmshared[k] = 0;
k += blocksize;
}
__syncthreads();
if (x >= xmin && x < xmax && y >= ymin && y < rows)
{
unsigned char i = intensity[iid];
unsigned char j = intensity[iid + copixel];
atomicAdd(&(glcmshared[i * depth + j]), 1);
atomicAdd(&(glcmshared[j * depth + i]), 1);
}
__syncthreads();
k = tid;
while (k < glcmsize)
{
atomicAdd(&(glcm[k]), glcmshared[k]);
k += blocksize;
}
}
void GLCMPerBlock(GLCMInfo &gi, float* time)
{
const int glcmsize = gi.depth * gi.depth * sizeof(float);
const int K = 16;
int xmin = 0, xmax = 0, ymin = 0, copixel = 0;
if (gi.angle == 0) { xmin = 0; xmax = gi.cols - gi.distance; ymin = 0; copixel = 1; }
else if (gi.angle == 45) { xmin = 0; xmax = gi.cols - gi.distance; ymin = gi.distance; copixel = 1 - gi.cols; }
else if (gi.angle == 90) { xmin = 0; xmax = gi.cols; ymin = gi.distance; copixel = -(gi.cols); }
else if (gi.angle == 135) { xmin = gi.distance; xmax = gi.cols; ymin = gi.distance; copixel = -1 - gi.cols; }
else {} // invalid angle -- handle?
dim3 blocks((gi.cols + K - 1) / K, (gi.rows + K - 1) / K);
dim3 threads(K, K);
hipEvent_t start = 0, stop = 0;
checkCudaErrors(hipEventCreate(&start, 0));
checkCudaErrors(hipEventCreate(&stop, 0));
checkCudaErrors(hipEventRecord(start, 0));
hipLaunchKernelGGL(( GLCMPerBlock_kernel), dim3(blocks), dim3(threads), glcmsize, 0, gi.d_intensity, gi.d_glcm, gi.rows, gi.cols, gi.depth, xmin, xmax, ymin, copixel);
checkCudaErrors(hipEventRecord(stop, 0));
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipEventElapsedTime(time, start, stop));
hipEventDestroy(start);
hipEventDestroy(stop);
}
void GLCMgpu_CalculateGLCM(GLCMInfo &gi)
{
float timeElapsed;
if (gi.depth > 64)
GLCMPerGrid(gi, &timeElapsed);
else
GLCMPerBlock(gi, &timeElapsed);
}
|
93dbf50268d593832b78a5080b715c3b2e30c03f.cu
|
#include <malloc.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <helper_cuda.h>
#include "GLCMCalculationGPU.cuh"
__global__ void GLCMNormalize_kernel(float* glcm, const int R)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
glcm[id] /= R;
}
void GLCMgpu_NormalizeGLCM(GLCMInfo &gi)
{
GLCMNormalize_kernel<<<gi.depth, gi.depth>>>(gi.d_glcm, gi.R);
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
__global__ void GLCMPerGrid_kernel(const unsigned char* intensity, float* glcm, const int rows, const int cols,
const int depth, const int xmin, const int xmax, const int ymin, const int copixel)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int iid = x + y * cols;
if (x >= xmin && x < xmax && y >= ymin && y < rows)
{
unsigned char i = intensity[iid];
unsigned char j = intensity[iid + copixel];
atomicAdd(&(glcm[i * depth + j]), 1);
atomicAdd(&(glcm[j * depth + i]), 1);
}
}
void GLCMPerGrid(GLCMInfo &gi, float* time)
{
const int K = 16;
int xmin = 0, xmax = 0, ymin = 0, copixel = 0;
if (gi.angle == 0) { xmin = 0; xmax = gi.cols - gi.distance; ymin = 0; copixel = 1; }
else if (gi.angle == 45) { xmin = 0; xmax = gi.cols - gi.distance; ymin = gi.distance; copixel = 1 - gi.cols; }
else if (gi.angle == 90) { xmin = 0; xmax = gi.cols; ymin = gi.distance; copixel = -(gi.cols); }
else if (gi.angle == 135) { xmin = gi.distance; xmax = gi.cols; ymin = gi.distance; copixel = -1 - gi.cols; }
else {} // invalid angle -- handle?
dim3 blocks((gi.cols + K - 1) / K, (gi.rows + K - 1) / K);
dim3 threads(K, K);
cudaEvent_t start = 0, stop = 0;
checkCudaErrors(cudaEventCreate(&start, 0));
checkCudaErrors(cudaEventCreate(&stop, 0));
checkCudaErrors(cudaEventRecord(start, 0));
GLCMPerGrid_kernel<<<blocks, threads>>>(gi.d_intensity, gi.d_glcm, gi.rows, gi.cols, gi.depth, xmin, xmax, ymin, copixel);
checkCudaErrors(cudaEventRecord(stop, 0));
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaEventElapsedTime(time, start, stop));
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
__global__ void GLCMPerBlock_kernel(const unsigned char* intensity, float* glcm, const int rows, const int cols,
const int depth, const int xmin, const int xmax, const int ymin, const int copixel)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int iid = x + y * cols;
int tid = threadIdx.x + threadIdx.y * blockDim.x;
int blocksize = blockDim.x * blockDim.y;
int glcmsize = depth * depth;
extern __shared__ int glcmshared[];
int k = tid;
while (k < glcmsize)
{
glcmshared[k] = 0;
k += blocksize;
}
__syncthreads();
if (x >= xmin && x < xmax && y >= ymin && y < rows)
{
unsigned char i = intensity[iid];
unsigned char j = intensity[iid + copixel];
atomicAdd(&(glcmshared[i * depth + j]), 1);
atomicAdd(&(glcmshared[j * depth + i]), 1);
}
__syncthreads();
k = tid;
while (k < glcmsize)
{
atomicAdd(&(glcm[k]), glcmshared[k]);
k += blocksize;
}
}
void GLCMPerBlock(GLCMInfo &gi, float* time)
{
const int glcmsize = gi.depth * gi.depth * sizeof(float);
const int K = 16;
int xmin = 0, xmax = 0, ymin = 0, copixel = 0;
if (gi.angle == 0) { xmin = 0; xmax = gi.cols - gi.distance; ymin = 0; copixel = 1; }
else if (gi.angle == 45) { xmin = 0; xmax = gi.cols - gi.distance; ymin = gi.distance; copixel = 1 - gi.cols; }
else if (gi.angle == 90) { xmin = 0; xmax = gi.cols; ymin = gi.distance; copixel = -(gi.cols); }
else if (gi.angle == 135) { xmin = gi.distance; xmax = gi.cols; ymin = gi.distance; copixel = -1 - gi.cols; }
else {} // invalid angle -- handle?
dim3 blocks((gi.cols + K - 1) / K, (gi.rows + K - 1) / K);
dim3 threads(K, K);
cudaEvent_t start = 0, stop = 0;
checkCudaErrors(cudaEventCreate(&start, 0));
checkCudaErrors(cudaEventCreate(&stop, 0));
checkCudaErrors(cudaEventRecord(start, 0));
GLCMPerBlock_kernel<<<blocks, threads, glcmsize>>>(gi.d_intensity, gi.d_glcm, gi.rows, gi.cols, gi.depth, xmin, xmax, ymin, copixel);
checkCudaErrors(cudaEventRecord(stop, 0));
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaEventElapsedTime(time, start, stop));
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void GLCMgpu_CalculateGLCM(GLCMInfo &gi)
{
float timeElapsed;
if (gi.depth > 64)
GLCMPerGrid(gi, &timeElapsed);
else
GLCMPerBlock(gi, &timeElapsed);
}
|
1f4c237508afa56835403df15c2df2af72677848.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/limits_gpu.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vecmath.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace mathfunc
{
// Performs reduction in shared memory
template <int size, typename T>
__device__ void sumInSmem(volatile T* data, const uint tid)
{
T sum = data[tid];
if (size >= 512) { if (tid < 256) { data[tid] = sum = sum + data[tid + 256]; } __syncthreads(); }
if (size >= 256) { if (tid < 128) { data[tid] = sum = sum + data[tid + 128]; } __syncthreads(); }
if (size >= 128) { if (tid < 64) { data[tid] = sum = sum + data[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) data[tid] = sum = sum + data[tid + 32];
if (size >= 32) data[tid] = sum = sum + data[tid + 16];
if (size >= 16) data[tid] = sum = sum + data[tid + 8];
if (size >= 8) data[tid] = sum = sum + data[tid + 4];
if (size >= 4) data[tid] = sum = sum + data[tid + 2];
if (size >= 2) data[tid] = sum = sum + data[tid + 1];
}
}
struct Mask8U
{
explicit Mask8U(PtrStep mask): mask(mask) {}
__device__ __forceinline__ bool operator()(int y, int x) const
{
return mask.ptr(y)[x];
}
PtrStep mask;
};
struct MaskTrue
{
__device__ __forceinline__ bool operator()(int y, int x) const
{
return true;
}
};
//////////////////////////////////////////////////////////////////////////////
// Min max
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits {};
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
namespace minmax
{
__constant__ int ctwidth;
__constant__ int ctheight;
// Global counter of blocks finished its work
__device__ uint blocks_finished = 0;
// Estimates good thread configuration
// - threads variable satisfies to threads.x * threads.y == 256
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = min(grid.x, threads.x);
grid.y = min(grid.y, threads.y);
}
// Returns required buffer sizes
void getBufSizeRequired(int cols, int rows, int elem_size, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * elem_size;
bufrows = 2;
}
// Estimates device constants which are used in the kernels using specified thread configuration
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(hipMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));
cudaSafeCall(hipMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));
}
// Does min and max in shared memory
template <typename T>
__device__ __forceinline__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval)
{
minval[tid] = min(minval[tid], minval[tid + offset]);
maxval[tid] = max(maxval[tid], maxval[tid + offset]);
}
template <int size, typename T>
__device__ void findMinMaxInSmem(volatile T* minval, volatile T* maxval, const uint tid)
{
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval); } __syncthreads(); }
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval); } __syncthreads(); }
if (size >= 128) { if (tid < 64) { merge(tid, 64, minval, maxval); } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) merge(tid, 32, minval, maxval);
if (size >= 32) merge(tid, 16, minval, maxval);
if (size >= 16) merge(tid, 8, minval, maxval);
if (size >= 8) merge(tid, 4, minval, maxval);
if (size >= 4) merge(tid, 2, minval, maxval);
if (size >= 2) merge(tid, 1, minval, maxval);
}
}
template <int nthreads, typename T, typename Mask>
__global__ void minMaxKernel(const DevMem2D src, Mask mask, T* minval, T* maxval)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
T mymin = numeric_limits_gpu<T>::max();
T mymax = numeric_limits_gpu<T>::is_signed ? -numeric_limits_gpu<T>::max() : numeric_limits_gpu<T>::min();
uint y_end = min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);
uint x_end = min(x0 + (ctwidth - 1) * blockDim.x + 1, src.cols);
for (uint y = y0; y < y_end; y += blockDim.y)
{
const T* src_row = (const T*)src.ptr(y);
for (uint x = x0; x < x_end; x += blockDim.x)
{
T val = src_row[x];
if (mask(y, x))
{
mymin = min(mymin, val);
mymax = max(mymax, val);
}
}
}
sminval[tid] = mymin;
smaxval[tid] = mymax;
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
}
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
uint idx = min(tid, gridDim.x * gridDim.y - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
}
#endif
}
template <typename T>
void minMaxMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
hipLaunchKernelGGL(( minMaxKernel<256, T, Mask8U>), dim3(grid), dim3(threads), 0, 0, src, Mask8U(mask), minval_buf, maxval_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMaskCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<char>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<short>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<int>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<float>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<double>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template <typename T>
void minMaxCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
hipLaunchKernelGGL(( minMaxKernel<256, T, MaskTrue>), dim3(grid), dim3(threads), 0, 0, src, MaskTrue(), minval_buf, maxval_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxCaller<uchar>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<char>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<ushort>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<short>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<int>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<float>(const DevMem2D, double*,double*, PtrStep);
template void minMaxCaller<double>(const DevMem2D, double*, double*, PtrStep);
template <int nthreads, typename T>
__global__ void minMaxPass2Kernel(T* minval, T* maxval, int size)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint idx = min(tid, size - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
}
}
template <typename T>
void minMaxMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
hipLaunchKernelGGL(( minMaxKernel<256, T, Mask8U>), dim3(grid), dim3(threads), 0, 0, src, Mask8U(mask), minval_buf, maxval_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( minMaxPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, minval_buf, maxval_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall(hipDeviceSynchronize());
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMaskMultipassCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<char>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<short>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<int>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<float>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template <typename T>
void minMaxMultipassCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
hipLaunchKernelGGL(( minMaxKernel<256, T, MaskTrue>), dim3(grid), dim3(threads), 0, 0, src, MaskTrue(), minval_buf, maxval_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( minMaxPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, minval_buf, maxval_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMultipassCaller<uchar>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<char>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<ushort>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<short>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<int>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<float>(const DevMem2D, double*, double*, PtrStep);
} // namespace minmax
///////////////////////////////////////////////////////////////////////////////
// minMaxLoc
namespace minmaxloc {
__constant__ int ctwidth;
__constant__ int ctheight;
// Global counter of blocks finished its work
__device__ uint blocks_finished = 0;
// Estimates good thread configuration
// - threads variable satisfies to threads.x * threads.y == 256
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = min(grid.x, threads.x);
grid.y = min(grid.y, threads.y);
}
// Returns required buffer sizes
void getBufSizeRequired(int cols, int rows, int elem_size, int& b1cols,
int& b1rows, int& b2cols, int& b2rows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
b1cols = grid.x * grid.y * elem_size; // For values
b1rows = 2;
b2cols = grid.x * grid.y * sizeof(int); // For locations
b2rows = 2;
}
// Estimates device constants which are used in the kernels using specified thread configuration
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(hipMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));
cudaSafeCall(hipMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));
}
template <typename T>
__device__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval,
volatile uint* minloc, volatile uint* maxloc)
{
T val = minval[tid + offset];
if (val < minval[tid])
{
minval[tid] = val;
minloc[tid] = minloc[tid + offset];
}
val = maxval[tid + offset];
if (val > maxval[tid])
{
maxval[tid] = val;
maxloc[tid] = maxloc[tid + offset];
}
}
template <int size, typename T>
__device__ void findMinMaxLocInSmem(volatile T* minval, volatile T* maxval, volatile uint* minloc,
volatile uint* maxloc, const uint tid)
{
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (size >= 128) { if (tid < 64) { merge(tid, 64, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) merge(tid, 32, minval, maxval, minloc, maxloc);
if (size >= 32) merge(tid, 16, minval, maxval, minloc, maxloc);
if (size >= 16) merge(tid, 8, minval, maxval, minloc, maxloc);
if (size >= 8) merge(tid, 4, minval, maxval, minloc, maxloc);
if (size >= 4) merge(tid, 2, minval, maxval, minloc, maxloc);
if (size >= 2) merge(tid, 1, minval, maxval, minloc, maxloc);
}
}
template <int nthreads, typename T, typename Mask>
__global__ void minMaxLocKernel(const DevMem2D src, Mask mask, T* minval, T* maxval,
uint* minloc, uint* maxloc)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
__shared__ uint sminloc[nthreads];
__shared__ uint smaxloc[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
T mymin = numeric_limits_gpu<T>::max();
T mymax = numeric_limits_gpu<T>::is_signed ? -numeric_limits_gpu<T>::max() :
numeric_limits_gpu<T>::min();
uint myminloc = 0;
uint mymaxloc = 0;
uint y_end = min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);
uint x_end = min(x0 + (ctwidth - 1) * blockDim.x + 1, src.cols);
for (uint y = y0; y < y_end; y += blockDim.y)
{
const T* ptr = (const T*)src.ptr(y);
for (uint x = x0; x < x_end; x += blockDim.x)
{
if (mask(y, x))
{
T val = ptr[x];
if (val <= mymin) { mymin = val; myminloc = y * src.cols + x; }
if (val >= mymax) { mymax = val; mymaxloc = y * src.cols + x; }
}
}
}
sminval[tid] = mymin;
smaxval[tid] = mymax;
sminloc[tid] = myminloc;
smaxloc[tid] = mymaxloc;
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
minloc[blockIdx.y * gridDim.x + blockIdx.x] = sminloc[0];
maxloc[blockIdx.y * gridDim.x + blockIdx.x] = smaxloc[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
uint idx = min(tid, gridDim.x * gridDim.y - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
sminloc[tid] = minloc[idx];
smaxloc[tid] = maxloc[idx];
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
minloc[0] = sminloc[0];
maxloc[0] = smaxloc[0];
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
minloc[blockIdx.y * gridDim.x + blockIdx.x] = sminloc[0];
maxloc[blockIdx.y * gridDim.x + blockIdx.x] = smaxloc[0];
}
#endif
}
template <typename T>
void minMaxLocMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
hipLaunchKernelGGL(( minMaxLocKernel<256, T, Mask8U>), dim3(grid), dim3(threads), 0, 0, src, Mask8U(mask), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall( hipMemcpy(&minloc_, minloc_buf, sizeof(int), hipMemcpyDeviceToHost) );
cudaSafeCall( hipMemcpy(&maxloc_, maxloc_buf, sizeof(int), hipMemcpyDeviceToHost) );
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMaskCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<char>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<short>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<int>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<float>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<double>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template <typename T>
void minMaxLocCaller(const DevMem2D src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
hipLaunchKernelGGL(( minMaxLocKernel<256, T, MaskTrue>), dim3(grid), dim3(threads), 0, 0, src, MaskTrue(), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(hipMemcpy(&minloc_, minloc_buf, sizeof(int), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxloc_, maxloc_buf, sizeof(int), hipMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocCaller<uchar>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<char>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<ushort>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<short>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<int>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<float>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<double>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
// This kernel will be used only when compute capability is 1.0
template <int nthreads, typename T>
__global__ void minMaxLocPass2Kernel(T* minval, T* maxval, uint* minloc, uint* maxloc, int size)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
__shared__ uint sminloc[nthreads];
__shared__ uint smaxloc[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint idx = min(tid, size - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
sminloc[tid] = minloc[idx];
smaxloc[tid] = maxloc[idx];
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
minloc[0] = sminloc[0];
maxloc[0] = smaxloc[0];
}
}
template <typename T>
void minMaxLocMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
hipLaunchKernelGGL(( minMaxLocKernel<256, T, Mask8U>), dim3(grid), dim3(threads), 0, 0, src, Mask8U(mask), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( minMaxLocPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(hipMemcpy(&minloc_, minloc_buf, sizeof(int), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxloc_, maxloc_buf, sizeof(int), hipMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMaskMultipassCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<char>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<short>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<int>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<float>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template <typename T>
void minMaxLocMultipassCaller(const DevMem2D src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
hipLaunchKernelGGL(( minMaxLocKernel<256, T, MaskTrue>), dim3(grid), dim3(threads), 0, 0, src, MaskTrue(), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( minMaxLocPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(hipMemcpy(&minval_, minval_buf, sizeof(T), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxval_, maxval_buf, sizeof(T), hipMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(hipMemcpy(&minloc_, minloc_buf, sizeof(int), hipMemcpyDeviceToHost));
cudaSafeCall(hipMemcpy(&maxloc_, maxloc_buf, sizeof(int), hipMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMultipassCaller<uchar>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<char>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<ushort>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<short>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<int>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<float>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
} // namespace minmaxloc
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// countNonZero
namespace countnonzero
{
__constant__ int ctwidth;
__constant__ int ctheight;
__device__ uint blocks_finished = 0;
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = min(grid.x, threads.x);
grid.y = min(grid.y, threads.y);
}
void getBufSizeRequired(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * sizeof(int);
bufrows = 1;
}
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(hipMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));
cudaSafeCall(hipMemcpyToSymbol(ctheight, &theight, sizeof(theight)));
}
template <int nthreads, typename T>
__global__ void countNonZeroKernel(const DevMem2D src, volatile uint* count)
{
__shared__ uint scount[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint cnt = 0;
for (uint y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);
for (uint x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
cnt += ptr[x0 + x * blockDim.x] != 0;
}
scount[tid] = cnt;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
scount[tid] = tid < gridDim.x * gridDim.y ? count[tid] : 0;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
if (tid == 0)
{
count[0] = scount[0];
blocks_finished = 0;
}
}
#else
if (tid == 0) count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0];
#endif
}
template <typename T>
int countNonZeroCaller(const DevMem2D src, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
uint* count_buf = (uint*)buf.ptr(0);
hipLaunchKernelGGL(( countNonZeroKernel<256, T>), dim3(grid), dim3(threads), 0, 0, src, count_buf);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
uint count;
cudaSafeCall(hipMemcpy(&count, count_buf, sizeof(int), hipMemcpyDeviceToHost));
return count;
}
template int countNonZeroCaller<uchar>(const DevMem2D, PtrStep);
template int countNonZeroCaller<char>(const DevMem2D, PtrStep);
template int countNonZeroCaller<ushort>(const DevMem2D, PtrStep);
template int countNonZeroCaller<short>(const DevMem2D, PtrStep);
template int countNonZeroCaller<int>(const DevMem2D, PtrStep);
template int countNonZeroCaller<float>(const DevMem2D, PtrStep);
template int countNonZeroCaller<double>(const DevMem2D, PtrStep);
template <int nthreads, typename T>
__global__ void countNonZeroPass2Kernel(uint* count, int size)
{
__shared__ uint scount[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
scount[tid] = tid < size ? count[tid] : 0;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
if (tid == 0)
count[0] = scount[0];
}
template <typename T>
int countNonZeroMultipassCaller(const DevMem2D src, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
uint* count_buf = (uint*)buf.ptr(0);
hipLaunchKernelGGL(( countNonZeroKernel<256, T>), dim3(grid), dim3(threads), 0, 0, src, count_buf);
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( countNonZeroPass2Kernel<256, T>), dim3(1), dim3(256), 0, 0, count_buf, grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
uint count;
cudaSafeCall(hipMemcpy(&count, count_buf, sizeof(int), hipMemcpyDeviceToHost));
return count;
}
template int countNonZeroMultipassCaller<uchar>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<char>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<ushort>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<short>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<int>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<float>(const DevMem2D, PtrStep);
} // namespace countnonzero
//////////////////////////////////////////////////////////////////////////
// Sum
namespace sums
{
template <typename T> struct SumType {};
template <> struct SumType<uchar> { typedef uint R; };
template <> struct SumType<char> { typedef int R; };
template <> struct SumType<ushort> { typedef uint R; };
template <> struct SumType<short> { typedef int R; };
template <> struct SumType<int> { typedef int R; };
template <> struct SumType<float> { typedef float R; };
template <> struct SumType<double> { typedef double R; };
template <typename R>
struct IdentityOp { static __device__ __forceinline__ R call(R x) { return x; } };
template <typename R>
struct AbsOp { static __device__ __forceinline__ R call(R x) { return abs(x); } };
template <>
struct AbsOp<uint> { static __device__ __forceinline__ uint call(uint x) { return x; } };
template <typename R>
struct SqrOp { static __device__ __forceinline__ R call(R x) { return x * x; } };
__constant__ int ctwidth;
__constant__ int ctheight;
__device__ uint blocks_finished = 0;
const int threads_x = 32;
const int threads_y = 8;
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, threads.x * threads.y),
divUp(rows, threads.y * threads.x));
grid.x = min(grid.x, threads.x);
grid.y = min(grid.y, threads.y);
}
void getBufSizeRequired(int cols, int rows, int cn, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * sizeof(double) * cn;
bufrows = 1;
}
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(hipMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));
cudaSafeCall(hipMemcpyToSymbol(ctheight, &theight, sizeof(theight)));
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel(const DevMem2D src, R* result)
{
__shared__ R smem[nthreads];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
R sum = 0;
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
sum += Op::call(ptr[x0 + x * blockDim.x]);
}
smem[tid] = sum;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
result[bid] = smem[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
smem[tid] = tid < gridDim.x * gridDim.y ? result[tid] : 0;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
if (tid == 0)
{
result[0] = smem[0];
blocks_finished = 0;
}
}
#else
if (tid == 0) result[bid] = smem[0];
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel(R* result, int size)
{
__shared__ R smem[nthreads];
int tid = threadIdx.y * blockDim.x + threadIdx.x;
smem[tid] = tid < size ? result[tid] : 0;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
if (tid == 0)
result[0] = smem[0];
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C2(const DevMem2D src, typename TypeVec<R, 2>::vec_t* result)
{
typedef typename TypeVec<T, 2>::vec_t SrcType;
typedef typename TypeVec<R, 2>::vec_t DstType;
__shared__ R smem[nthreads * 2];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C2(typename TypeVec<R, 2>::vec_t* result, int size)
{
typedef typename TypeVec<R, 2>::vec_t DstType;
__shared__ R smem[nthreads * 2];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
result[0] = res;
}
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C3(const DevMem2D src, typename TypeVec<R, 3>::vec_t* result)
{
typedef typename TypeVec<T, 3>::vec_t SrcType;
typedef typename TypeVec<R, 3>::vec_t DstType;
__shared__ R smem[nthreads * 3];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y), Op::call(val.z));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
smem[tid + 2 * nthreads] = sum.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C3(typename TypeVec<R, 3>::vec_t* result, int size)
{
typedef typename TypeVec<R, 3>::vec_t DstType;
__shared__ R smem[nthreads * 3];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[0] = res;
}
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C4(const DevMem2D src, typename TypeVec<R, 4>::vec_t* result)
{
typedef typename TypeVec<T, 4>::vec_t SrcType;
typedef typename TypeVec<R, 4>::vec_t DstType;
__shared__ R smem[nthreads * 4];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y),
Op::call(val.z), Op::call(val.w));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
smem[tid + 2 * nthreads] = sum.z;
smem[tid + 3 * nthreads] = sum.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
smem[tid + 3 * nthreads] = res.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C4(typename TypeVec<R, 4>::vec_t* result, int size)
{
typedef typename TypeVec<R, 4>::vec_t DstType;
__shared__ R smem[nthreads * 4];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
smem[tid + 3 * nthreads] = res.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[0] = res;
}
}
} // namespace sum
template <typename T>
void sumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 1>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C2<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 2>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C3<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 3>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C4<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 4>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
}
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(&result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sumMultipassCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<char>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<short>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<int>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void sumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
break;
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(&result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sumCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<char>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<short>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<int>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void absSumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 1>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C2<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 2>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C3<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 3>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C4<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 4>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
}
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void absSumMultipassCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<char>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<short>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<int>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void absSumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, AbsOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
break;
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void absSumCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<char>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<short>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<int>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void sqrSumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 1>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C2<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 2>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C3<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 3>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
cudaSafeCall( hipGetLastError() );
hipLaunchKernelGGL(( sumPass2Kernel_C4<T, R, threads_x * threads_y>), dim3(1), dim3(threads_x * threads_y), 0, 0,
(typename TypeVec<R, 4>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( hipGetLastError() );
break;
}
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sqrSumMultipassCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<char>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<short>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<int>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void sqrSumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
hipLaunchKernelGGL(( sumKernel<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
break;
case 2:
hipLaunchKernelGGL(( sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
break;
case 3:
hipLaunchKernelGGL(( sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
break;
case 4:
hipLaunchKernelGGL(( sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y>), dim3(grid), dim3(threads), 0, 0,
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
break;
}
cudaSafeCall( hipGetLastError() );
cudaSafeCall( hipDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(hipMemcpy(result, buf.ptr(0), sizeof(R) * cn, hipMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sqrSumCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<char>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<short>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<int>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<float>(const DevMem2D, PtrStep, double*, int);
}}}
|
1f4c237508afa56835403df15c2df2af72677848.cu
|
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "opencv2/gpu/device/limits_gpu.hpp"
#include "opencv2/gpu/device/saturate_cast.hpp"
#include "opencv2/gpu/device/vecmath.hpp"
#include "opencv2/gpu/device/transform.hpp"
#include "internal_shared.hpp"
using namespace cv::gpu;
using namespace cv::gpu::device;
namespace cv { namespace gpu { namespace mathfunc
{
// Performs reduction in shared memory
template <int size, typename T>
__device__ void sumInSmem(volatile T* data, const uint tid)
{
T sum = data[tid];
if (size >= 512) { if (tid < 256) { data[tid] = sum = sum + data[tid + 256]; } __syncthreads(); }
if (size >= 256) { if (tid < 128) { data[tid] = sum = sum + data[tid + 128]; } __syncthreads(); }
if (size >= 128) { if (tid < 64) { data[tid] = sum = sum + data[tid + 64]; } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) data[tid] = sum = sum + data[tid + 32];
if (size >= 32) data[tid] = sum = sum + data[tid + 16];
if (size >= 16) data[tid] = sum = sum + data[tid + 8];
if (size >= 8) data[tid] = sum = sum + data[tid + 4];
if (size >= 4) data[tid] = sum = sum + data[tid + 2];
if (size >= 2) data[tid] = sum = sum + data[tid + 1];
}
}
struct Mask8U
{
explicit Mask8U(PtrStep mask): mask(mask) {}
__device__ __forceinline__ bool operator()(int y, int x) const
{
return mask.ptr(y)[x];
}
PtrStep mask;
};
struct MaskTrue
{
__device__ __forceinline__ bool operator()(int y, int x) const
{
return true;
}
};
//////////////////////////////////////////////////////////////////////////////
// Min max
// To avoid shared bank conflicts we convert each value into value of
// appropriate type (32 bits minimum)
template <typename T> struct MinMaxTypeTraits {};
template <> struct MinMaxTypeTraits<uchar> { typedef int best_type; };
template <> struct MinMaxTypeTraits<char> { typedef int best_type; };
template <> struct MinMaxTypeTraits<ushort> { typedef int best_type; };
template <> struct MinMaxTypeTraits<short> { typedef int best_type; };
template <> struct MinMaxTypeTraits<int> { typedef int best_type; };
template <> struct MinMaxTypeTraits<float> { typedef float best_type; };
template <> struct MinMaxTypeTraits<double> { typedef double best_type; };
namespace minmax
{
__constant__ int ctwidth;
__constant__ int ctheight;
// Global counter of blocks finished its work
__device__ uint blocks_finished = 0;
// Estimates good thread configuration
// - threads variable satisfies to threads.x * threads.y == 256
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = min(grid.x, threads.x);
grid.y = min(grid.y, threads.y);
}
// Returns required buffer sizes
void getBufSizeRequired(int cols, int rows, int elem_size, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * elem_size;
bufrows = 2;
}
// Estimates device constants which are used in the kernels using specified thread configuration
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));
cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));
}
// Does min and max in shared memory
template <typename T>
__device__ __forceinline__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval)
{
minval[tid] = min(minval[tid], minval[tid + offset]);
maxval[tid] = max(maxval[tid], maxval[tid + offset]);
}
template <int size, typename T>
__device__ void findMinMaxInSmem(volatile T* minval, volatile T* maxval, const uint tid)
{
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval); } __syncthreads(); }
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval); } __syncthreads(); }
if (size >= 128) { if (tid < 64) { merge(tid, 64, minval, maxval); } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) merge(tid, 32, minval, maxval);
if (size >= 32) merge(tid, 16, minval, maxval);
if (size >= 16) merge(tid, 8, minval, maxval);
if (size >= 8) merge(tid, 4, minval, maxval);
if (size >= 4) merge(tid, 2, minval, maxval);
if (size >= 2) merge(tid, 1, minval, maxval);
}
}
template <int nthreads, typename T, typename Mask>
__global__ void minMaxKernel(const DevMem2D src, Mask mask, T* minval, T* maxval)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
T mymin = numeric_limits_gpu<T>::max();
T mymax = numeric_limits_gpu<T>::is_signed ? -numeric_limits_gpu<T>::max() : numeric_limits_gpu<T>::min();
uint y_end = min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);
uint x_end = min(x0 + (ctwidth - 1) * blockDim.x + 1, src.cols);
for (uint y = y0; y < y_end; y += blockDim.y)
{
const T* src_row = (const T*)src.ptr(y);
for (uint x = x0; x < x_end; x += blockDim.x)
{
T val = src_row[x];
if (mask(y, x))
{
mymin = min(mymin, val);
mymax = max(mymax, val);
}
}
}
sminval[tid] = mymin;
smaxval[tid] = mymax;
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
}
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
uint idx = min(tid, gridDim.x * gridDim.y - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
}
#endif
}
template <typename T>
void minMaxMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
minMaxKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMaskCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<char>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<short>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<int>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<float>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskCaller<double>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template <typename T>
void minMaxCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
minMaxKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxCaller<uchar>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<char>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<ushort>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<short>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<int>(const DevMem2D, double*, double*, PtrStep);
template void minMaxCaller<float>(const DevMem2D, double*,double*, PtrStep);
template void minMaxCaller<double>(const DevMem2D, double*, double*, PtrStep);
template <int nthreads, typename T>
__global__ void minMaxPass2Kernel(T* minval, T* maxval, int size)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint idx = min(tid, size - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
__syncthreads();
findMinMaxInSmem<nthreads, best_type>(sminval, smaxval, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
}
}
template <typename T>
void minMaxMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
minMaxKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf);
cudaSafeCall( cudaGetLastError() );
minMaxPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall(cudaDeviceSynchronize());
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMaskMultipassCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<char>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<short>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<int>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template void minMaxMaskMultipassCaller<float>(const DevMem2D, const PtrStep, double*, double*, PtrStep);
template <typename T>
void minMaxMultipassCaller(const DevMem2D src, double* minval, double* maxval, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)buf.ptr(0);
T* maxval_buf = (T*)buf.ptr(1);
minMaxKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf);
cudaSafeCall( cudaGetLastError() );
minMaxPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
}
template void minMaxMultipassCaller<uchar>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<char>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<ushort>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<short>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<int>(const DevMem2D, double*, double*, PtrStep);
template void minMaxMultipassCaller<float>(const DevMem2D, double*, double*, PtrStep);
} // namespace minmax
///////////////////////////////////////////////////////////////////////////////
// minMaxLoc
namespace minmaxloc {
__constant__ int ctwidth;
__constant__ int ctheight;
// Global counter of blocks finished its work
__device__ uint blocks_finished = 0;
// Estimates good thread configuration
// - threads variable satisfies to threads.x * threads.y == 256
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = min(grid.x, threads.x);
grid.y = min(grid.y, threads.y);
}
// Returns required buffer sizes
void getBufSizeRequired(int cols, int rows, int elem_size, int& b1cols,
int& b1rows, int& b2cols, int& b2rows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
b1cols = grid.x * grid.y * elem_size; // For values
b1rows = 2;
b2cols = grid.x * grid.y * sizeof(int); // For locations
b2rows = 2;
}
// Estimates device constants which are used in the kernels using specified thread configuration
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(ctwidth)));
cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(ctheight)));
}
template <typename T>
__device__ void merge(uint tid, uint offset, volatile T* minval, volatile T* maxval,
volatile uint* minloc, volatile uint* maxloc)
{
T val = minval[tid + offset];
if (val < minval[tid])
{
minval[tid] = val;
minloc[tid] = minloc[tid + offset];
}
val = maxval[tid + offset];
if (val > maxval[tid])
{
maxval[tid] = val;
maxloc[tid] = maxloc[tid + offset];
}
}
template <int size, typename T>
__device__ void findMinMaxLocInSmem(volatile T* minval, volatile T* maxval, volatile uint* minloc,
volatile uint* maxloc, const uint tid)
{
if (size >= 512) { if (tid < 256) { merge(tid, 256, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (size >= 256) { if (tid < 128) { merge(tid, 128, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (size >= 128) { if (tid < 64) { merge(tid, 64, minval, maxval, minloc, maxloc); } __syncthreads(); }
if (tid < 32)
{
if (size >= 64) merge(tid, 32, minval, maxval, minloc, maxloc);
if (size >= 32) merge(tid, 16, minval, maxval, minloc, maxloc);
if (size >= 16) merge(tid, 8, minval, maxval, minloc, maxloc);
if (size >= 8) merge(tid, 4, minval, maxval, minloc, maxloc);
if (size >= 4) merge(tid, 2, minval, maxval, minloc, maxloc);
if (size >= 2) merge(tid, 1, minval, maxval, minloc, maxloc);
}
}
template <int nthreads, typename T, typename Mask>
__global__ void minMaxLocKernel(const DevMem2D src, Mask mask, T* minval, T* maxval,
uint* minloc, uint* maxloc)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
__shared__ uint sminloc[nthreads];
__shared__ uint smaxloc[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
T mymin = numeric_limits_gpu<T>::max();
T mymax = numeric_limits_gpu<T>::is_signed ? -numeric_limits_gpu<T>::max() :
numeric_limits_gpu<T>::min();
uint myminloc = 0;
uint mymaxloc = 0;
uint y_end = min(y0 + (ctheight - 1) * blockDim.y + 1, src.rows);
uint x_end = min(x0 + (ctwidth - 1) * blockDim.x + 1, src.cols);
for (uint y = y0; y < y_end; y += blockDim.y)
{
const T* ptr = (const T*)src.ptr(y);
for (uint x = x0; x < x_end; x += blockDim.x)
{
if (mask(y, x))
{
T val = ptr[x];
if (val <= mymin) { mymin = val; myminloc = y * src.cols + x; }
if (val >= mymax) { mymax = val; mymaxloc = y * src.cols + x; }
}
}
}
sminval[tid] = mymin;
smaxval[tid] = mymax;
sminloc[tid] = myminloc;
smaxloc[tid] = mymaxloc;
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
minloc[blockIdx.y * gridDim.x + blockIdx.x] = sminloc[0];
maxloc[blockIdx.y * gridDim.x + blockIdx.x] = smaxloc[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
uint idx = min(tid, gridDim.x * gridDim.y - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
sminloc[tid] = minloc[idx];
smaxloc[tid] = maxloc[idx];
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
minloc[0] = sminloc[0];
maxloc[0] = smaxloc[0];
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
minval[blockIdx.y * gridDim.x + blockIdx.x] = (T)sminval[0];
maxval[blockIdx.y * gridDim.x + blockIdx.x] = (T)smaxval[0];
minloc[blockIdx.y * gridDim.x + blockIdx.x] = sminloc[0];
maxloc[blockIdx.y * gridDim.x + blockIdx.x] = smaxloc[0];
}
#endif
}
template <typename T>
void minMaxLocMaskCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall( cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost) );
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall( cudaMemcpy(&minloc_, minloc_buf, sizeof(int), cudaMemcpyDeviceToHost) );
cudaSafeCall( cudaMemcpy(&maxloc_, maxloc_buf, sizeof(int), cudaMemcpyDeviceToHost) );
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMaskCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<char>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<short>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<int>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<float>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskCaller<double>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template <typename T>
void minMaxLocCaller(const DevMem2D src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(cudaMemcpy(&minloc_, minloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxloc_, maxloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocCaller<uchar>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<char>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<ushort>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<short>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<int>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<float>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocCaller<double>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
// This kernel will be used only when compute capability is 1.0
template <int nthreads, typename T>
__global__ void minMaxLocPass2Kernel(T* minval, T* maxval, uint* minloc, uint* maxloc, int size)
{
typedef typename MinMaxTypeTraits<T>::best_type best_type;
__shared__ best_type sminval[nthreads];
__shared__ best_type smaxval[nthreads];
__shared__ uint sminloc[nthreads];
__shared__ uint smaxloc[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint idx = min(tid, size - 1);
sminval[tid] = minval[idx];
smaxval[tid] = maxval[idx];
sminloc[tid] = minloc[idx];
smaxloc[tid] = maxloc[idx];
__syncthreads();
findMinMaxLocInSmem<nthreads, best_type>(sminval, smaxval, sminloc, smaxloc, tid);
if (tid == 0)
{
minval[0] = (T)sminval[0];
maxval[0] = (T)smaxval[0];
minloc[0] = sminloc[0];
maxloc[0] = smaxloc[0];
}
}
template <typename T>
void minMaxLocMaskMultipassCaller(const DevMem2D src, const PtrStep mask, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
minMaxLocKernel<256, T, Mask8U><<<grid, threads>>>(src, Mask8U(mask), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( cudaGetLastError() );
minMaxLocPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(cudaMemcpy(&minloc_, minloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxloc_, maxloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMaskMultipassCaller<uchar>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<char>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<ushort>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<short>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<int>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMaskMultipassCaller<float>(const DevMem2D, const PtrStep, double*, double*, int[2], int[2], PtrStep, PtrStep);
template <typename T>
void minMaxLocMultipassCaller(const DevMem2D src, double* minval, double* maxval,
int minloc[2], int maxloc[2], PtrStep valbuf, PtrStep locbuf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
T* minval_buf = (T*)valbuf.ptr(0);
T* maxval_buf = (T*)valbuf.ptr(1);
uint* minloc_buf = (uint*)locbuf.ptr(0);
uint* maxloc_buf = (uint*)locbuf.ptr(1);
minMaxLocKernel<256, T, MaskTrue><<<grid, threads>>>(src, MaskTrue(), minval_buf, maxval_buf,
minloc_buf, maxloc_buf);
cudaSafeCall( cudaGetLastError() );
minMaxLocPass2Kernel<256, T><<<1, 256>>>(minval_buf, maxval_buf, minloc_buf, maxloc_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
T minval_, maxval_;
cudaSafeCall(cudaMemcpy(&minval_, minval_buf, sizeof(T), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxval_, maxval_buf, sizeof(T), cudaMemcpyDeviceToHost));
*minval = minval_;
*maxval = maxval_;
uint minloc_, maxloc_;
cudaSafeCall(cudaMemcpy(&minloc_, minloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
cudaSafeCall(cudaMemcpy(&maxloc_, maxloc_buf, sizeof(int), cudaMemcpyDeviceToHost));
minloc[1] = minloc_ / src.cols; minloc[0] = minloc_ - minloc[1] * src.cols;
maxloc[1] = maxloc_ / src.cols; maxloc[0] = maxloc_ - maxloc[1] * src.cols;
}
template void minMaxLocMultipassCaller<uchar>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<char>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<ushort>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<short>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<int>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
template void minMaxLocMultipassCaller<float>(const DevMem2D, double*, double*, int[2], int[2], PtrStep, PtrStep);
} // namespace minmaxloc
//////////////////////////////////////////////////////////////////////////////////////////////////////////
// countNonZero
namespace countnonzero
{
__constant__ int ctwidth;
__constant__ int ctheight;
__device__ uint blocks_finished = 0;
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(32, 8);
grid = dim3(divUp(cols, threads.x * 8), divUp(rows, threads.y * 32));
grid.x = min(grid.x, threads.x);
grid.y = min(grid.y, threads.y);
}
void getBufSizeRequired(int cols, int rows, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * sizeof(int);
bufrows = 1;
}
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));
cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(theight)));
}
template <int nthreads, typename T>
__global__ void countNonZeroKernel(const DevMem2D src, volatile uint* count)
{
__shared__ uint scount[nthreads];
uint x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
uint y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
uint cnt = 0;
for (uint y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);
for (uint x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
cnt += ptr[x0 + x * blockDim.x] != 0;
}
scount[tid] = cnt;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = ticket == gridDim.x * gridDim.y - 1;
}
__syncthreads();
if (is_last)
{
scount[tid] = tid < gridDim.x * gridDim.y ? count[tid] : 0;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
if (tid == 0)
{
count[0] = scount[0];
blocks_finished = 0;
}
}
#else
if (tid == 0) count[blockIdx.y * gridDim.x + blockIdx.x] = scount[0];
#endif
}
template <typename T>
int countNonZeroCaller(const DevMem2D src, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
uint* count_buf = (uint*)buf.ptr(0);
countNonZeroKernel<256, T><<<grid, threads>>>(src, count_buf);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
uint count;
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(int), cudaMemcpyDeviceToHost));
return count;
}
template int countNonZeroCaller<uchar>(const DevMem2D, PtrStep);
template int countNonZeroCaller<char>(const DevMem2D, PtrStep);
template int countNonZeroCaller<ushort>(const DevMem2D, PtrStep);
template int countNonZeroCaller<short>(const DevMem2D, PtrStep);
template int countNonZeroCaller<int>(const DevMem2D, PtrStep);
template int countNonZeroCaller<float>(const DevMem2D, PtrStep);
template int countNonZeroCaller<double>(const DevMem2D, PtrStep);
template <int nthreads, typename T>
__global__ void countNonZeroPass2Kernel(uint* count, int size)
{
__shared__ uint scount[nthreads];
uint tid = threadIdx.y * blockDim.x + threadIdx.x;
scount[tid] = tid < size ? count[tid] : 0;
__syncthreads();
sumInSmem<nthreads, uint>(scount, tid);
if (tid == 0)
count[0] = scount[0];
}
template <typename T>
int countNonZeroMultipassCaller(const DevMem2D src, PtrStep buf)
{
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
uint* count_buf = (uint*)buf.ptr(0);
countNonZeroKernel<256, T><<<grid, threads>>>(src, count_buf);
cudaSafeCall( cudaGetLastError() );
countNonZeroPass2Kernel<256, T><<<1, 256>>>(count_buf, grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
uint count;
cudaSafeCall(cudaMemcpy(&count, count_buf, sizeof(int), cudaMemcpyDeviceToHost));
return count;
}
template int countNonZeroMultipassCaller<uchar>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<char>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<ushort>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<short>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<int>(const DevMem2D, PtrStep);
template int countNonZeroMultipassCaller<float>(const DevMem2D, PtrStep);
} // namespace countnonzero
//////////////////////////////////////////////////////////////////////////
// Sum
namespace sums
{
template <typename T> struct SumType {};
template <> struct SumType<uchar> { typedef uint R; };
template <> struct SumType<char> { typedef int R; };
template <> struct SumType<ushort> { typedef uint R; };
template <> struct SumType<short> { typedef int R; };
template <> struct SumType<int> { typedef int R; };
template <> struct SumType<float> { typedef float R; };
template <> struct SumType<double> { typedef double R; };
template <typename R>
struct IdentityOp { static __device__ __forceinline__ R call(R x) { return x; } };
template <typename R>
struct AbsOp { static __device__ __forceinline__ R call(R x) { return abs(x); } };
template <>
struct AbsOp<uint> { static __device__ __forceinline__ uint call(uint x) { return x; } };
template <typename R>
struct SqrOp { static __device__ __forceinline__ R call(R x) { return x * x; } };
__constant__ int ctwidth;
__constant__ int ctheight;
__device__ uint blocks_finished = 0;
const int threads_x = 32;
const int threads_y = 8;
void estimateThreadCfg(int cols, int rows, dim3& threads, dim3& grid)
{
threads = dim3(threads_x, threads_y);
grid = dim3(divUp(cols, threads.x * threads.y),
divUp(rows, threads.y * threads.x));
grid.x = min(grid.x, threads.x);
grid.y = min(grid.y, threads.y);
}
void getBufSizeRequired(int cols, int rows, int cn, int& bufcols, int& bufrows)
{
dim3 threads, grid;
estimateThreadCfg(cols, rows, threads, grid);
bufcols = grid.x * grid.y * sizeof(double) * cn;
bufrows = 1;
}
void setKernelConsts(int cols, int rows, const dim3& threads, const dim3& grid)
{
int twidth = divUp(divUp(cols, grid.x), threads.x);
int theight = divUp(divUp(rows, grid.y), threads.y);
cudaSafeCall(cudaMemcpyToSymbol(ctwidth, &twidth, sizeof(twidth)));
cudaSafeCall(cudaMemcpyToSymbol(ctheight, &theight, sizeof(theight)));
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel(const DevMem2D src, R* result)
{
__shared__ R smem[nthreads];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
R sum = 0;
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const T* ptr = (const T*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
sum += Op::call(ptr[x0 + x * blockDim.x]);
}
smem[tid] = sum;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
result[bid] = smem[0];
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
smem[tid] = tid < gridDim.x * gridDim.y ? result[tid] : 0;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
if (tid == 0)
{
result[0] = smem[0];
blocks_finished = 0;
}
}
#else
if (tid == 0) result[bid] = smem[0];
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel(R* result, int size)
{
__shared__ R smem[nthreads];
int tid = threadIdx.y * blockDim.x + threadIdx.x;
smem[tid] = tid < size ? result[tid] : 0;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
if (tid == 0)
result[0] = smem[0];
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C2(const DevMem2D src, typename TypeVec<R, 2>::vec_t* result)
{
typedef typename TypeVec<T, 2>::vec_t SrcType;
typedef typename TypeVec<R, 2>::vec_t DstType;
__shared__ R smem[nthreads * 2];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C2(typename TypeVec<R, 2>::vec_t* result, int size)
{
typedef typename TypeVec<R, 2>::vec_t DstType;
__shared__ R smem[nthreads * 2];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
result[0] = res;
}
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C3(const DevMem2D src, typename TypeVec<R, 3>::vec_t* result)
{
typedef typename TypeVec<T, 3>::vec_t SrcType;
typedef typename TypeVec<R, 3>::vec_t DstType;
__shared__ R smem[nthreads * 3];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y), Op::call(val.z));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
smem[tid + 2 * nthreads] = sum.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C3(typename TypeVec<R, 3>::vec_t* result, int size)
{
typedef typename TypeVec<R, 3>::vec_t DstType;
__shared__ R smem[nthreads * 3];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
result[0] = res;
}
}
template <typename T, typename R, typename Op, int nthreads>
__global__ void sumKernel_C4(const DevMem2D src, typename TypeVec<R, 4>::vec_t* result)
{
typedef typename TypeVec<T, 4>::vec_t SrcType;
typedef typename TypeVec<R, 4>::vec_t DstType;
__shared__ R smem[nthreads * 4];
const int x0 = blockIdx.x * blockDim.x * ctwidth + threadIdx.x;
const int y0 = blockIdx.y * blockDim.y * ctheight + threadIdx.y;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int bid = blockIdx.y * gridDim.x + blockIdx.x;
SrcType val;
DstType sum = VecTraits<DstType>::all(0);
for (int y = 0; y < ctheight && y0 + y * blockDim.y < src.rows; ++y)
{
const SrcType* ptr = (const SrcType*)src.ptr(y0 + y * blockDim.y);
for (int x = 0; x < ctwidth && x0 + x * blockDim.x < src.cols; ++x)
{
val = ptr[x0 + x * blockDim.x];
sum = sum + VecTraits<DstType>::make(Op::call(val.x), Op::call(val.y),
Op::call(val.z), Op::call(val.w));
}
}
smem[tid] = sum.x;
smem[tid + nthreads] = sum.y;
smem[tid + 2 * nthreads] = sum.z;
smem[tid + 3 * nthreads] = sum.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 110
__shared__ bool is_last;
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[bid] = res;
__threadfence();
uint ticket = atomicInc(&blocks_finished, gridDim.x * gridDim.y);
is_last = (ticket == gridDim.x * gridDim.y - 1);
}
__syncthreads();
if (is_last)
{
DstType res = tid < gridDim.x * gridDim.y ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
smem[tid + 3 * nthreads] = res.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[0] = res;
blocks_finished = 0;
}
}
#else
if (tid == 0)
{
DstType res;
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[bid] = res;
}
#endif
}
template <typename T, typename R, int nthreads>
__global__ void sumPass2Kernel_C4(typename TypeVec<R, 4>::vec_t* result, int size)
{
typedef typename TypeVec<R, 4>::vec_t DstType;
__shared__ R smem[nthreads * 4];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
DstType res = tid < size ? result[tid] : VecTraits<DstType>::all(0);
smem[tid] = res.x;
smem[tid + nthreads] = res.y;
smem[tid + 2 * nthreads] = res.z;
smem[tid + 3 * nthreads] = res.w;
__syncthreads();
sumInSmem<nthreads, R>(smem, tid);
sumInSmem<nthreads, R>(smem + nthreads, tid);
sumInSmem<nthreads, R>(smem + 2 * nthreads, tid);
sumInSmem<nthreads, R>(smem + 3 * nthreads, tid);
if (tid == 0)
{
res.x = smem[0];
res.y = smem[nthreads];
res.z = smem[2 * nthreads];
res.w = smem[3 * nthreads];
result[0] = res;
}
}
} // namespace sum
template <typename T>
void sumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 1>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 2:
sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 2>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 3:
sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 3>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 4:
sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 4>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
}
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(&result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sumMultipassCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<char>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<short>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<int>(const DevMem2D, PtrStep, double*, int);
template void sumMultipassCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void sumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
break;
case 2:
sumKernel_C2<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
break;
case 3:
sumKernel_C3<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
break;
case 4:
sumKernel_C4<T, R, IdentityOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
break;
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(&result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sumCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<char>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<short>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<int>(const DevMem2D, PtrStep, double*, int);
template void sumCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void absSumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 1>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 2:
sumKernel_C2<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 2>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 3:
sumKernel_C3<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 3>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 4:
sumKernel_C4<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 4>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
}
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void absSumMultipassCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<char>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<short>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<int>(const DevMem2D, PtrStep, double*, int);
template void absSumMultipassCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void absSumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
break;
case 2:
sumKernel_C2<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
break;
case 3:
sumKernel_C3<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
break;
case 4:
sumKernel_C4<T, R, AbsOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
break;
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void absSumCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<char>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<short>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<int>(const DevMem2D, PtrStep, double*, int);
template void absSumCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void sqrSumMultipassCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 1>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 2:
sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C2<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 2>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 3:
sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C3<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 3>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
case 4:
sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
cudaSafeCall( cudaGetLastError() );
sumPass2Kernel_C4<T, R, threads_x * threads_y><<<1, threads_x * threads_y>>>(
(typename TypeVec<R, 4>::vec_t*)buf.ptr(0), grid.x * grid.y);
cudaSafeCall( cudaGetLastError() );
break;
}
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sqrSumMultipassCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<char>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<short>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<int>(const DevMem2D, PtrStep, double*, int);
template void sqrSumMultipassCaller<float>(const DevMem2D, PtrStep, double*, int);
template <typename T>
void sqrSumCaller(const DevMem2D src, PtrStep buf, double* sum, int cn)
{
using namespace sums;
typedef typename SumType<T>::R R;
dim3 threads, grid;
estimateThreadCfg(src.cols, src.rows, threads, grid);
setKernelConsts(src.cols, src.rows, threads, grid);
switch (cn)
{
case 1:
sumKernel<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 1>::vec_t*)buf.ptr(0));
break;
case 2:
sumKernel_C2<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 2>::vec_t*)buf.ptr(0));
break;
case 3:
sumKernel_C3<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 3>::vec_t*)buf.ptr(0));
break;
case 4:
sumKernel_C4<T, R, SqrOp<R>, threads_x * threads_y><<<grid, threads>>>(
src, (typename TypeVec<R, 4>::vec_t*)buf.ptr(0));
break;
}
cudaSafeCall( cudaGetLastError() );
cudaSafeCall( cudaDeviceSynchronize() );
R result[4] = {0, 0, 0, 0};
cudaSafeCall(cudaMemcpy(result, buf.ptr(0), sizeof(R) * cn, cudaMemcpyDeviceToHost));
sum[0] = result[0];
sum[1] = result[1];
sum[2] = result[2];
sum[3] = result[3];
}
template void sqrSumCaller<uchar>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<char>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<ushort>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<short>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<int>(const DevMem2D, PtrStep, double*, int);
template void sqrSumCaller<float>(const DevMem2D, PtrStep, double*, int);
}}}
|
b1e70d25eec3936a61a8bb5b735d1d2a108000cf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "datcreater_hip.cuh"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/device_vector.h>
#include "hiprand/hiprand_kernel.h"
template<typename T1, typename T2>
constexpr auto imin(T1 a, T2 b) { return a < b ? a: b; }
bool Rect::intersects(Rect& other)
{
if (rb.x + 1 < other.lt.x || lt.x - 1 > other.rb.x || rb.y - 1 > other.lt.y && lt.y + 1 < other.rb.y)
//
return false;
return true;
}
__global__ void create_rects(Point d, int nTriang, int nCircl, Pair<thrust::device_vector<Rect>>* conflicts,
int polySize, int* polygon, thrust::device_vector<Rect>* rects)
{
hiprandState_t* state;
hiprand_init(unsigned(time(NULL)), threadIdx.x, 0, state);
generateGarbage(d, polySize, polygon, state);
Point start(threadIdx.x * d.x, blockIdx.x * d.y); // ,
Point end = Point(start.x + d.x, start.y + d.y);
for (int i = 0; i < nTriang + nCircl; i++)
{
Point lt, rb;
do
{
lt = { randUniform(start.x + 1, start.x + d.x, state), randUniform(start.y + 1, start.y + d.y, state) };
rb = { abs(randNorm(EXPECTED_VAL, DISPERSION, state)) + lt.x, abs(randNorm(EXPECTED_VAL, DISPERSION, state)) + lt.y };
} while (rb.x - lt.x > MIN_RECT_SIDE || lt.y - rb.y > MIN_RECT_SIDE);
Rect nr(lt, rb);
if (onPolygonEdge(nr, polySize))
rects->push_back(nr);
}
// (, )
pop_intersecting(*rects); //
for (auto nr: *rects)
{
bool second = atRight(nr, start, end);
if (second)
//
conflicts[blockIdx.x * gridDim.x + threadIdx.x].second.push_back(nr);
else
//
conflicts[blockIdx.x * gridDim.x + threadIdx.x].first.push_back(nr);
}
}
__global__ void create_objects(int nTriang, int nCircl, Pair<thrust::device_vector<Rect>>* conflicts,
int polySize, int* polygon, thrust::device_vector<Rect>* rects, int* realTriang, int* realCirc)
{
hiprandState_t* state;
hiprand_init(unsigned(time(NULL)), threadIdx.x, 0, state);
// resolveConflicts(conflicts); //
rects->clear();
rects->insert(rects->end(),
conflicts[blockIdx.x * gridDim.x + threadIdx.x].first.begin(),
conflicts[blockIdx.x * gridDim.x + threadIdx.x].first.end());
rects->insert(rects->end(),
conflicts[blockIdx.x * gridDim.x + threadIdx.x].second.begin(),
conflicts[blockIdx.x * gridDim.x + threadIdx.x].second.end());
// rects
conflicts[blockIdx.x * gridDim.x + threadIdx.x].first.clear();
conflicts[blockIdx.x * gridDim.x + threadIdx.x].second.clear();
size_t old = nTriang + nCircl;
float df = rects->size() / old;
//
thrust::device_vector<Triangle> triangles;
thrust::device_vector<Circle> circles;
generateTriangles(triangles, nTriang * df, *rects, state);
generateCircles(circles, nCircl * df, *rects);
//
drawTriangles(triangles, polySize, polygon);
drawCircles(circles, polySize, polygon);
*realCirc = circles.size();
*realTriang = triangles.size();
}
__device__ void pop_intersecting(thrust::device_vector<Rect>& v)
{ // ,
for (int i = 0; i < v.size(); i++)
for (int j = i + 1; j < v.size(); j++)
{
Rect r = v[i];
Rect r_n = v[j];
if (r_n.intersects(r))
{
v.erase(v.begin() + j);
j--;
}
}
}
__device__ void resolveConflicts(thrust::device_vector<Rect>& v1, thrust::device_vector<Rect>& v2)
// v2 , v1
{
for (auto it = v1.begin(); it != v1.end(); it++)
{
for (auto it2 = v2.begin(); it2 != v2.end(); )
{
Rect r_n = *it2;
Rect rr = *it;
if (rr.intersects(r_n))
it2 = v2.erase(it2);
else
it2++;
}
}
}
__device__ void resolveConflicts(Pair<thrust::device_vector<Rect>>* conf)
// ;
// first -- -; second -- -
// -- !!!
{
auto v1 = conf[blockIdx.x * gridDim.x + threadIdx.x].second;
for (int i = 0; i < 4; i++)
{
int j;
switch (i)
{
case 0: {
//
j = blockIdx.x * gridDim.x + threadIdx.x + 1;
break;
}
case 1: {
// -
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x + 1;
break;
}
case 2: {
//
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x;
break;
}
case 3: {
// -
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x - 1;
break;
}
}
if (j < MAX_CONFLICT)
{
thrust::device_vector<Rect>& v2 = conf[j].first;
resolveConflicts(v2, v1);
}
// syncAll();
}
}
__global__ void resolveConflicts(Pair<thrust::device_vector<Rect>>* conf, int nSide)
{
auto v1 = conf[blockIdx.x * gridDim.x + threadIdx.x].second;
int j;
switch (nSide)
{
case 0: {
//
j = blockIdx.x * gridDim.x + threadIdx.x + 1;
break;
}
case 1: {
// -
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x + 1;
break;
}
case 2: {
//
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x;
break;
}
case 3: {
// -
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x - 1;
break;
}
}
if (j < MAX_CONFLICT)
{
thrust::device_vector<Rect>& v2 = conf[j].first;
resolveConflicts(v2, v1);
}
}
__device__ void generateTriangles(thrust::device_vector<Triangle>& triangles, int size, thrust::device_vector<Rect>& rects, hiprandState_t* state)
{
for (int cnt = 0; cnt < size; cnt++)
{
Rect rect = rects.back();
rects.pop_back();
Point p1, p2, p3;
do
{
generatePoint(p1, rect, state);
generatePoint(p2, rect, state);
generatePoint(p3, rect, state);
} while (Perimetr2(p1, p2, p3) < TRING_PERIM
#ifdef ACK_ANGLED
&& iSackAngled(p1, p2, p3)
#endif
);
triangles.push_back(Triangle(p1, p2, p3));
}
}
__device__ void generateCircles(thrust::device_vector<Circle>& circles, int size, thrust::device_vector<Rect>& rects)
{
for (int cnt = 0; cnt < size; cnt++)
{
Rect rect = rects.back();
rects.pop_back();
int r = imin((rect.rb.x - rect.lt.x) / 2, (rect.lt.y - rect.rb.y) / 2);
Point center(r + rect.lt.x, r + rect.rb.y);
circles.push_back(Circle(center, r));
}
}
__device__ float Perimetr2(Point p1, Point p2, Point p3)
{
return (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y) +
(p1.x - p3.x) * (p1.x - p3.x) + (p1.y - p3.y) * (p1.y - p3.y) +
(p3.x - p2.x) * (p3.x - p2.x) + (p3.y - p2.y) * (p3.y - p2.y);
}
__device__ void generatePoint(Point& p, Rect& rect, hiprandState_t* state)
{
float r = randUniform(0, 1, state);
int side = randUniform(0, 3.998, state);
if (side == 0 || side == 2)
{
p.y = rect.rb.y + (rect.lt.y - rect.rb.y) * r;
p.x = (side == 0) ? rect.lt.x : rect.rb.x;
}
else
{
p.x = rect.lt.x + (rect.rb.x - rect.lt.x) * r;
p.y = (side == 1) ? rect.lt.y : rect.rb.y;
}
}
__device__ bool onBorder(Rect& r, Point lt, Point rb)
//
{
if (r.lt.x <= lt.x || r.lt.y >= lt.y || r.rb.x >= rb.x || r.rb.y <= rb.y)
return true;
return false;
}
__device__ bool onPolygonEdge(Rect& r, int polySize)
{
if (r.lt.x <= 0 || r.lt.y >= polySize || r.rb.x >= polySize || r.rb.y <= 0)
return true;
return false;
}
__device__ bool atLeft(Rect r, Point lt, Point rb)
{
return r.lt.x <= lt.x && r.lt.y > rb.y // ( -)
|| r.lt.y >= lt.y; //
}
__device__ bool atRight(Rect r, Point lt, Point rb)
{
return r.rb.x >= rb.x && r.rb.y < lt.y || // ( -)
r.rb.y >= rb.y; //
}
/*__device__ void syncAll()
{
using namespace cooperative_groups;
grid_group grid = this_grid();
grid.sync(); //
}*/
__device__ bool notHyp(Point& p1, Point& p2, Point& p3)
{
return (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y) +
(p1.x - p3.x) * (p1.x - p3.x) + (p1.y - p3.y) * (p1.y - p3.y) >
(p3.x - p2.x) * (p3.x - p2.x) + (p3.y - p2.y) * (p3.y - p2.y);
}
__device__ bool iSackAngled(Point& p1, Point& p2, Point& p3)
{
return notHyp(p1, p2, p3) && notHyp(p2, p3, p1) && notHyp(p3, p1, p2);
}
__device__ float randUniform(float min, float max, hiprandState_t* state)
{
if (max < min)
throw "MAX < MIN";
//hiprandState_t localState = **state;
float r = min + hiprand_uniform(state) * (max - min);
//**state = localState;
return r;
}
__device__ float randNorm(float ev, float disp, hiprandState_t* state)
{
float r = ev + disp * hiprand_normal(state);
return r;
}
__device__ void drawTriangles(thrust::device_vector<Triangle>& triangles, int polySize, int* polygon)
{
for (Triangle t : triangles)
{
drawLine(t.p1, t.p2, polySize, polygon);
drawLine(t.p3, t.p2, polySize, polygon);
drawLine(t.p1, t.p3, polySize, polygon);
}
}
__device__ void generateGarbage(Point d, int polySize, int* polygon, hiprandState_t* state)
{
for (int i = 0; i <= d.y; i++)
for (int j = 0; j <= d.x; j++)
{
int r;
do
{
r = randUniform(0, _UI8_MAX, state);
} while (r == TRIANG_COLOR || r == CIRCLE_COLOR);
polygon[j + i * polySize] = r;
}
}
__device__ void drawCircles(thrust::device_vector<Circle>& circles, int polySize, int* polygon)
{
//
// , ,
for (Circle c : circles)
{
int x = 0;
int y = c.r;
int delta = 1 - 2 * c.r;
int error = 0;
while (y >= 0)
{
drawPixel(c.center.x + x, c.center.y + y, polySize, polygon, CIRCLE_COLOR);
drawPixel(c.center.x + x, c.center.y - y, polySize, polygon, CIRCLE_COLOR);
drawPixel(c.center.x + x, c.center.y + y, polySize, polygon, CIRCLE_COLOR);
drawPixel(c.center.x + x, c.center.y + y, polySize, polygon, CIRCLE_COLOR);
error = 2 * (delta + y) - 1;
if ((delta < 0) && (error <= 0))
{
delta += 2 * ++x + 1;
continue;
}
if ((delta > 0) && (error > 0))
{
delta -= 2 * --y + 1;
continue;
delta += 2 * (++x - --y);
}
}
}
}
__device__ int _abs(int x)
{
return x > 0 ? x : -x;
}
__device__ void drawPixel(int x, int y, int polySize, int* polygon, int color)
{
polygon[x + y * polySize] = color;
}
__device__ void drawLine(Point p1, Point p2, int polySize, int* polygon)
{
//
// , ?..
int dx = p2.x - p1.x, dy = p2.y - p1.y, d, x, y, d1, d2;
if (((_abs(dx) > _abs(dy)) && (p2.x < p1.x)) || ((_abs(dx) <= _abs(dy)) && (p2.y < p1.y)))
{
x = p1.x;
p1.x = p2.x;
p2.x = x;
y = p1.y;
p1.y = p2.y;
p2.y = y;
dx = p2.x - p1.x;
dy = p2.y - p1.y;
}
drawPixel(p1.x, p1.y, polySize, polygon, TRIANG_COLOR);
int stp = 1;
if (_abs(dx) > _abs(dy))
{
if (dy < 0)
{
stp = -1;
dy = -dy;
}
d = dy * 2 - dx;
d1 = dy * 2;
d2 = (dy - dx) * 2;
y = p1.y;
for (x = p1.x + 1; x <= p2.x; x++)
{
if (d > 0)
{
y += stp;
d += d2;
}
else
d += d1;
drawPixel(x, y, polySize, polygon, TRIANG_COLOR);
}
}
else
{
if (dx < 0)
{
stp = -1;
dx = -dx;
}
d = (dx * 2) - dy;
d1 = dx * 2;
d2 = (dx - dy) * 2;
x = p1.x;
for (y = p1.y + 1; y <= p2.y; y++)
{
if (d > 0)
{
x += stp;
d += d2;
}
else
d += d1;
drawPixel(x, y, polySize, polygon, TRIANG_COLOR);
}
}
}
|
b1e70d25eec3936a61a8bb5b735d1d2a108000cf.cu
|
#include "datcreater.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/device_vector.h>
#include "curand_kernel.h"
template<typename T1, typename T2>
constexpr auto imin(T1 a, T2 b) { return a < b ? a: b; }
bool Rect::intersects(Rect& other)
{
if (rb.x + 1 < other.lt.x || lt.x - 1 > other.rb.x || rb.y - 1 > other.lt.y && lt.y + 1 < other.rb.y)
// прямоугольники лежат на расстоянии друг от друга
return false;
return true;
}
__global__ void create_rects(Point d, int nTriang, int nCircl, Pair<thrust::device_vector<Rect>>* conflicts,
int polySize, int* polygon, thrust::device_vector<Rect>* rects)
{
curandState* state;
curand_init(unsigned(time(NULL)), threadIdx.x, 0, state);
generateGarbage(d, polySize, polygon, state);
Point start(threadIdx.x * d.x, blockIdx.x * d.y); // левый верхний угол квадрата, обрабатываемого данной нитью
Point end = Point(start.x + d.x, start.y + d.y);
for (int i = 0; i < nTriang + nCircl; i++)
{
Point lt, rb;
do
{
lt = { randUniform(start.x + 1, start.x + d.x, state), randUniform(start.y + 1, start.y + d.y, state) };
rb = { abs(randNorm(EXPECTED_VAL, DISPERSION, state)) + lt.x, abs(randNorm(EXPECTED_VAL, DISPERSION, state)) + lt.y };
} while (rb.x - lt.x > MIN_RECT_SIDE || lt.y - rb.y > MIN_RECT_SIDE);
Rect nr(lt, rb);
if (onPolygonEdge(nr, polySize))
rects->push_back(nr);
}
// создали случайные прямоугольники (возможно, пересекающиеся)
pop_intersecting(*rects); // решаем внутренние проблемы
for (auto nr: *rects)
{
bool second = atRight(nr, start, end);
if (second)
// прямоугольник может конфликтовать с нижней дугой
conflicts[blockIdx.x * gridDim.x + threadIdx.x].second.push_back(nr);
else
// прямоугольник может конфликтовать с верхней дугой
conflicts[blockIdx.x * gridDim.x + threadIdx.x].first.push_back(nr);
}
}
__global__ void create_objects(int nTriang, int nCircl, Pair<thrust::device_vector<Rect>>* conflicts,
int polySize, int* polygon, thrust::device_vector<Rect>* rects, int* realTriang, int* realCirc)
{
curandState* state;
curand_init(unsigned(time(NULL)), threadIdx.x, 0, state);
// resolveConflicts(conflicts); // разрешаем коллизии на границе
rects->clear();
rects->insert(rects->end(),
conflicts[blockIdx.x * gridDim.x + threadIdx.x].first.begin(),
conflicts[blockIdx.x * gridDim.x + threadIdx.x].first.end());
rects->insert(rects->end(),
conflicts[blockIdx.x * gridDim.x + threadIdx.x].second.begin(),
conflicts[blockIdx.x * gridDim.x + threadIdx.x].second.end());
// теперь все прямоугольники лежат в rects
conflicts[blockIdx.x * gridDim.x + threadIdx.x].first.clear();
conflicts[blockIdx.x * gridDim.x + threadIdx.x].second.clear();
size_t old = nTriang + nCircl;
float df = rects->size() / old;
// теперь прямоугольники не пересекаются
thrust::device_vector<Triangle> triangles;
thrust::device_vector<Circle> circles;
generateTriangles(triangles, nTriang * df, *rects, state);
generateCircles(circles, nCircl * df, *rects);
// векторы заполнены случайными треугольниками и кругами
drawTriangles(triangles, polySize, polygon);
drawCircles(circles, polySize, polygon);
*realCirc = circles.size();
*realTriang = triangles.size();
}
__device__ void pop_intersecting(thrust::device_vector<Rect>& v)
{ // удаляет из вектора все прямоугольники, которые пересекаются с предыдущими
for (int i = 0; i < v.size(); i++)
for (int j = i + 1; j < v.size(); j++)
{
Rect r = v[i];
Rect r_n = v[j];
if (r_n.intersects(r))
{
v.erase(v.begin() + j);
j--;
}
}
}
__device__ void resolveConflicts(thrust::device_vector<Rect>& v1, thrust::device_vector<Rect>& v2)
// удаляет из v2 все прямоугольники, пересекающиеся с прямоугольниками из v1
{
for (auto it = v1.begin(); it != v1.end(); it++)
{
for (auto it2 = v2.begin(); it2 != v2.end(); )
{
Rect r_n = *it2;
Rect rr = *it;
if (rr.intersects(r_n))
it2 = v2.erase(it2);
else
it2++;
}
}
}
__device__ void resolveConflicts(Pair<thrust::device_vector<Rect>>* conf)
// решить все пограничные конфликты прямоугольников;
// first -- векторы на границе слева-сверху; second -- справа-снизу
// синхронизация удалена -- НЕ ИСПОЛЬЗОВАТЬ ЭТУ МЕРЗОСТЬ БЕЗ ДОБАВЛЕНИЯ СИНХРОНИЗАЦИИ!!!
{
auto v1 = conf[blockIdx.x * gridDim.x + threadIdx.x].second;
for (int i = 0; i < 4; i++)
{
int j;
switch (i)
{
case 0: {
// сосед справа
j = blockIdx.x * gridDim.x + threadIdx.x + 1;
break;
}
case 1: {
// сосед справа-снизу
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x + 1;
break;
}
case 2: {
// сосед снизу
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x;
break;
}
case 3: {
// сосед слева-снизу
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x - 1;
break;
}
}
if (j < MAX_CONFLICT)
{
thrust::device_vector<Rect>& v2 = conf[j].first;
resolveConflicts(v2, v1);
}
// syncAll();
}
}
__global__ void resolveConflicts(Pair<thrust::device_vector<Rect>>* conf, int nSide)
{
auto v1 = conf[blockIdx.x * gridDim.x + threadIdx.x].second;
int j;
switch (nSide)
{
case 0: {
// сосед справа
j = blockIdx.x * gridDim.x + threadIdx.x + 1;
break;
}
case 1: {
// сосед справа-снизу
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x + 1;
break;
}
case 2: {
// сосед снизу
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x;
break;
}
case 3: {
// сосед слева-снизу
j = (blockIdx.x + 1) * gridDim.x + threadIdx.x - 1;
break;
}
}
if (j < MAX_CONFLICT)
{
thrust::device_vector<Rect>& v2 = conf[j].first;
resolveConflicts(v2, v1);
}
}
__device__ void generateTriangles(thrust::device_vector<Triangle>& triangles, int size, thrust::device_vector<Rect>& rects, curandState* state)
{
for (int cnt = 0; cnt < size; cnt++)
{
Rect rect = rects.back();
rects.pop_back();
Point p1, p2, p3;
do
{
generatePoint(p1, rect, state);
generatePoint(p2, rect, state);
generatePoint(p3, rect, state);
} while (Perimetr2(p1, p2, p3) < TRING_PERIM
#ifdef ACK_ANGLED
&& iSackAngled(p1, p2, p3)
#endif
);
triangles.push_back(Triangle(p1, p2, p3));
}
}
__device__ void generateCircles(thrust::device_vector<Circle>& circles, int size, thrust::device_vector<Rect>& rects)
{
for (int cnt = 0; cnt < size; cnt++)
{
Rect rect = rects.back();
rects.pop_back();
int r = imin((rect.rb.x - rect.lt.x) / 2, (rect.lt.y - rect.rb.y) / 2);
Point center(r + rect.lt.x, r + rect.rb.y);
circles.push_back(Circle(center, r));
}
}
__device__ float Perimetr2(Point p1, Point p2, Point p3)
{
return (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y) +
(p1.x - p3.x) * (p1.x - p3.x) + (p1.y - p3.y) * (p1.y - p3.y) +
(p3.x - p2.x) * (p3.x - p2.x) + (p3.y - p2.y) * (p3.y - p2.y);
}
__device__ void generatePoint(Point& p, Rect& rect, curandState* state)
{
float r = randUniform(0, 1, state);
int side = randUniform(0, 3.998, state);
if (side == 0 || side == 2)
{
p.y = rect.rb.y + (rect.lt.y - rect.rb.y) * r;
p.x = (side == 0) ? rect.lt.x : rect.rb.x;
}
else
{
p.x = rect.lt.x + (rect.rb.x - rect.lt.x) * r;
p.y = (side == 1) ? rect.lt.y : rect.rb.y;
}
}
__device__ bool onBorder(Rect& r, Point lt, Point rb)
// прямоугольник лежит недалеко от границы
{
if (r.lt.x <= lt.x || r.lt.y >= lt.y || r.rb.x >= rb.x || r.rb.y <= rb.y)
return true;
return false;
}
__device__ bool onPolygonEdge(Rect& r, int polySize)
{
if (r.lt.x <= 0 || r.lt.y >= polySize || r.rb.x >= polySize || r.rb.y <= 0)
return true;
return false;
}
__device__ bool atLeft(Rect r, Point lt, Point rb)
{
return r.lt.x <= lt.x && r.lt.y > rb.y // прямоугольник слева (не лежит целиком слева-снизу)
|| r.lt.y >= lt.y; // или сверху
}
__device__ bool atRight(Rect r, Point lt, Point rb)
{
return r.rb.x >= rb.x && r.rb.y < lt.y || // прямоугольник справа (не лежит целиком справа-сверху)
r.rb.y >= rb.y; // или снизу
}
/*__device__ void syncAll()
{
using namespace cooperative_groups;
grid_group grid = this_grid();
grid.sync(); // синхронизация всех потоков всех блоков
}*/
__device__ bool notHyp(Point& p1, Point& p2, Point& p3)
{
return (p1.x - p2.x) * (p1.x - p2.x) + (p1.y - p2.y) * (p1.y - p2.y) +
(p1.x - p3.x) * (p1.x - p3.x) + (p1.y - p3.y) * (p1.y - p3.y) >
(p3.x - p2.x) * (p3.x - p2.x) + (p3.y - p2.y) * (p3.y - p2.y);
}
__device__ bool iSackAngled(Point& p1, Point& p2, Point& p3)
{
return notHyp(p1, p2, p3) && notHyp(p2, p3, p1) && notHyp(p3, p1, p2);
}
__device__ float randUniform(float min, float max, curandState* state)
{
if (max < min)
throw "MAX < MIN";
//curandState localState = **state;
float r = min + curand_uniform(state) * (max - min);
//**state = localState;
return r;
}
__device__ float randNorm(float ev, float disp, curandState* state)
{
float r = ev + disp * curand_normal(state);
return r;
}
__device__ void drawTriangles(thrust::device_vector<Triangle>& triangles, int polySize, int* polygon)
{
for (Triangle t : triangles)
{
drawLine(t.p1, t.p2, polySize, polygon);
drawLine(t.p3, t.p2, polySize, polygon);
drawLine(t.p1, t.p3, polySize, polygon);
}
}
__device__ void generateGarbage(Point d, int polySize, int* polygon, curandState* state)
{
for (int i = 0; i <= d.y; i++)
for (int j = 0; j <= d.x; j++)
{
int r;
do
{
r = randUniform(0, _UI8_MAX, state);
} while (r == TRIANG_COLOR || r == CIRCLE_COLOR);
polygon[j + i * polySize] = r;
}
}
__device__ void drawCircles(thrust::device_vector<Circle>& circles, int polySize, int* polygon)
{
// алгоритм Брезенхема для рисования окружностей
// Возможно, это еще хуже, чем рисование прямой
for (Circle c : circles)
{
int x = 0;
int y = c.r;
int delta = 1 - 2 * c.r;
int error = 0;
while (y >= 0)
{
drawPixel(c.center.x + x, c.center.y + y, polySize, polygon, CIRCLE_COLOR);
drawPixel(c.center.x + x, c.center.y - y, polySize, polygon, CIRCLE_COLOR);
drawPixel(c.center.x + x, c.center.y + y, polySize, polygon, CIRCLE_COLOR);
drawPixel(c.center.x + x, c.center.y + y, polySize, polygon, CIRCLE_COLOR);
error = 2 * (delta + y) - 1;
if ((delta < 0) && (error <= 0))
{
delta += 2 * ++x + 1;
continue;
}
if ((delta > 0) && (error > 0))
{
delta -= 2 * --y + 1;
continue;
delta += 2 * (++x - --y);
}
}
}
}
__device__ int _abs(int x)
{
return x > 0 ? x : -x;
}
__device__ void drawPixel(int x, int y, int polySize, int* polygon, int color)
{
polygon[x + y * polySize] = color;
}
__device__ void drawLine(Point p1, Point p2, int polySize, int* polygon)
{
// Алгоритм Брезенхема
// это ужасно, но что поделаешь?..
int dx = p2.x - p1.x, dy = p2.y - p1.y, d, x, y, d1, d2;
if (((_abs(dx) > _abs(dy)) && (p2.x < p1.x)) || ((_abs(dx) <= _abs(dy)) && (p2.y < p1.y)))
{
x = p1.x;
p1.x = p2.x;
p2.x = x;
y = p1.y;
p1.y = p2.y;
p2.y = y;
dx = p2.x - p1.x;
dy = p2.y - p1.y;
}
drawPixel(p1.x, p1.y, polySize, polygon, TRIANG_COLOR);
int stp = 1;
if (_abs(dx) > _abs(dy))
{
if (dy < 0)
{
stp = -1;
dy = -dy;
}
d = dy * 2 - dx;
d1 = dy * 2;
d2 = (dy - dx) * 2;
y = p1.y;
for (x = p1.x + 1; x <= p2.x; x++)
{
if (d > 0)
{
y += stp;
d += d2;
}
else
d += d1;
drawPixel(x, y, polySize, polygon, TRIANG_COLOR);
}
}
else
{
if (dx < 0)
{
stp = -1;
dx = -dx;
}
d = (dx * 2) - dy;
d1 = dx * 2;
d2 = (dx - dy) * 2;
x = p1.x;
for (y = p1.y + 1; y <= p2.y; y++)
{
if (d > 0)
{
x += stp;
d += d2;
}
else
d += d1;
drawPixel(x, y, polySize, polygon, TRIANG_COLOR);
}
}
}
|
a31a34c383dedf42f9101439b31f6fd9b9b4f1e9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void updateExCovX(double *e_x_cov_x, double gauss_d2, int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_voxel_num; i += stride) {
e_x_cov_x[i] *= gauss_d2;
}
}
|
a31a34c383dedf42f9101439b31f6fd9b9b4f1e9.cu
|
#include "includes.h"
__global__ void updateExCovX(double *e_x_cov_x, double gauss_d2, int valid_voxel_num)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = id; i < valid_voxel_num; i += stride) {
e_x_cov_x[i] *= gauss_d2;
}
}
|
97de4c341b9054b54a39f0f54131dfefb38e77da.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <math.h>
// Makes this a cuda function
__global__
void add(int n, float *x, float *y) {
for (int i = 0; i < n; i++) {
y[i] = x[i] + y[i];
}
}
int main(void){
//Calling hipMalloc for memory accessibile by the gpu
int N = 1<<20; // 1M elements
float *x;
float *y;
hipMallocManaged(&x, N*sizeof(float));
hipMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, N, x, y);
// Wait for GPU to finish
hipDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
hipFree(x);
hipFree(y);
return 0;
}
|
97de4c341b9054b54a39f0f54131dfefb38e77da.cu
|
#include <iostream>
#include <math.h>
// Makes this a cuda function
__global__
void add(int n, float *x, float *y) {
for (int i = 0; i < n; i++) {
y[i] = x[i] + y[i];
}
}
int main(void){
//Calling cudaMalloc for memory accessibile by the gpu
int N = 1<<20; // 1M elements
float *x;
float *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
add<<<1,1>>>(N, x, y);
// Wait for GPU to finish
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
72fc887ea36389a517ba89fd526febb8b92d88f3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "split_pairwise.cuh"
#include "split_properties_helpers.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
__forceinline__ __device__ void AddToMatrices(int row, int col, float sum,
float* matrix) {
const int ind = col < row ? (row * (row + 1) >> 1) + col : (col * (col + 1) >> 1) + row;
matrix[ind] += sum;
}
template <int BLOCK_SIZE>
__global__ void MakePairwiseDerivatives(const float* pairwiseHistogram,
int matrixOffset,
int matCount,
int partCount,
int histLineSize /* 4 * totalBinFeatureCount */,
float* linearSystem) {
const int matricesPerBlock = BLOCK_SIZE / partCount;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / partCount;
int x = threadIdx.x & (partCount - 1);
const int inBlockOffset = threadIdx.x / partCount;
if (matrixIdx >= matCount)
return;
{
const size_t rowSize = partCount * 2;
const size_t linearSystemSize = (rowSize + rowSize * (rowSize + 1) / 2);
linearSystem += matrixIdx * linearSystemSize;
}
pairwiseHistogram += (matrixOffset + matrixIdx) * 4;
__shared__ float lineData[BLOCK_SIZE * 2];
float* row0 = &lineData[inBlockOffset * partCount];
float* row1 = &lineData[inBlockOffset * partCount + BLOCK_SIZE];
float colSum0 = 0.0f;
float colSum1 = 0.0f;
for (int y = 0; y < partCount; ++y) {
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64)partIdx * histLineSize * 4ULL);
const float w00 = (x != y ? __ldg(pairwiseHistogram + offset) : 0.0f);
const float w01 = __ldg(pairwiseHistogram + offset + 1);
const float w10 = __ldg(pairwiseHistogram + offset + 2);
const float w11 = (x != y ? __ldg(pairwiseHistogram + offset + 3) : 0.0f);
row0[x] = w01 + w00;
row1[x] = w10 + w11;
//symc for row write done in reduce if we need it
const float sum0 = FastInBlockReduce(x, row0, partCount);
const float sum1 = FastInBlockReduce(x, row1, partCount);
const int nextRow = 2 * y;
const int nextCol = 2 * x;
if (x == 0) {
AddToMatrices(nextRow, nextRow, sum0, linearSystem);
AddToMatrices(nextRow + 1, nextRow + 1, sum1, linearSystem);
}
colSum0 += w00 + w10;
colSum1 += w01 + w11;
if (x == y) {
AddToMatrices(nextRow + 1, nextRow, -(w01 + w10), linearSystem);
} else {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow , nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
__syncthreads();
}
const int nextRow = 2 * x;
linearSystem[nextRow * (nextRow + 1) / 2 + nextRow] += colSum0;
linearSystem[(nextRow + 1) * (nextRow + 2) / 2 + nextRow + 1] += colSum1;
}
template <int BLOCK_SIZE>
void RunMakeMatrices(const float* histogram, int partCount, int histLineSize, int firstMatrix, int matricesCount, float* linearSystem, TCudaStream stream) {
if (matricesCount > 0) {
const int numBlocks = (((size_t) matricesCount) * partCount + BLOCK_SIZE - 1) / BLOCK_SIZE;
MakePairwiseDerivatives<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> > (histogram, firstMatrix, matricesCount, partCount, histLineSize, linearSystem);
}
}
void MakePairwiseDerivatives(const float* histogram, int leavesCount, int firstMatrix, int matricesCount, int histLineSize, float* linearSystem,
TCudaStream stream) {
if (TArchProps::GetMajorVersion() == 2 && (leavesCount <= 64)) {
RunMakeMatrices<192>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
} else {
RunMakeMatrices<256>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
}
}
template <int BLOCK_SIZE>
__global__ void MakePointwiseDerivatives(const float* pointwiseHist, ui64 pointwiseHistSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matCount,
float* linearSystem) {
const int lineSize = min(rowSize, 32);
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / lineSize;
pointwiseHist += (firstMatrixIdx + matrixIdx) * (hasPointwiseWeights ? 2 : 1);
linearSystem += ((size_t)matrixIdx) * (rowSize + rowSize * (rowSize + 1) / 2);
const int x = threadIdx.x & (lineSize - 1);
float* targets = linearSystem + rowSize * (rowSize + 1) / 2;
if (matrixIdx < matCount) {
for (int col = x; col < rowSize; col += 32) {
const int i = col / 2;
ui64 offset = pointwiseHistSize * i;
if (hasPointwiseWeights) {
const float leafWeight = pointwiseHist[offset];
const float weight = (col & 1) ? partStats[i].Weight - leafWeight : leafWeight;
linearSystem[col * (col + 1) / 2 + col] += max(weight, 0.0f);
}
const float leafSum = pointwiseHist[offset + hasPointwiseWeights];
const float sum = (col & 1) ? partStats[i].Sum - leafSum : leafSum;
targets[col] = sum;
}
}
}
template <int BLOCK_SIZE>
void RunMakePointwiseDerivatives(const float* pointwiseHist, int binFeatureCount,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream
) {
if (matricesCount > 0) {
const ui32 pointwiseHistSize = binFeatureCount * (hasPointwiseWeights ? 2 : 1);
const int lineSize = min(32, rowSize);
const int numBlocks = (((size_t) matricesCount) * lineSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
MakePointwiseDerivatives<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE,0, stream >> > (pointwiseHist, pointwiseHistSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem);
}
}
void MakePointwiseDerivatives(const float* pointwiseHist, int pointwiseHistLineSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream) {
if (TArchProps::GetMajorVersion() == 2) {
RunMakePointwiseDerivatives<192> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
} else {
RunMakePointwiseDerivatives<128> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
}
}
__global__ void UpdateBinsPairs(TCFeature feature, ui32 binIdx,
const ui32* cindex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins) {
ui32 idx = blockIdx.x * blockDim.x + threadIdx.x;
cindex += feature.Offset;
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
while (idx < pairCount) {
const uint2 p = pairs[idx];
const ui32 d1 = (cindex[p.x] & mask);
const ui32 d2 = (cindex[p.y] & mask);
ui32 bit1 = feature.OneHotFeature ? d1 == value : d1 > value;
ui32 bit2 = feature.OneHotFeature ? d2 == value : d2 > value;
ui32 bin = bins[idx];
bin = ((bit1 * 2 + bit2) << (depth * 2)) | bin;
bins[idx] = bin;
idx += blockDim.x * gridDim.x;
}
}
void UpdateBinsPairs(TCFeature feature, ui32 bin,
const ui32* compressedIndex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = min((pairCount + blockSize - 1) / blockSize,
TArchProps::MaxBlockCount());
hipLaunchKernelGGL(( UpdateBinsPairs), dim3(numBlocks), dim3(blockSize), 0, stream, feature, bin, compressedIndex, pairs, pairCount, depth, bins);
}
template <int BLOCK_SIZE>
__global__ void SelectBestSplitImpl(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best) {
float maxScore = -5000000.0f;
int maxIdx = -1;
int tid = threadIdx.x;
#pragma unroll 8
for (int i = tid; i < size; i += BLOCK_SIZE) {
float score = scores[i];
if (score > maxScore) {
maxScore = score;
maxIdx = i;
}
}
__shared__ float vals[BLOCK_SIZE];
__shared__ int inds[BLOCK_SIZE];
vals[tid] = maxScore;
inds[tid] = maxIdx;
__syncthreads();
for (int s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( vals[tid] < vals[tid + s] || (vals[tid] == vals[tid + s] && inds[tid] > inds[tid + s]) ) {
vals[tid] = vals[tid + s];
inds[tid] = inds[tid + s];
}
}
__syncthreads();
}
if (tid == 0) {
TCBinFeature bestFeature;
const int bestIdx = inds[0];
const float bestScore = vals[0];
if (bestIdx != -1) {
bestFeature = binFeature[bestIdx];
} else {
bestFeature.BinId = 0;
bestFeature.FeatureId = 0;
}
best->Index = bestIndexBias + bestIdx;
best->Score = -bestScore;
best->BinId = bestFeature.BinId;
best->FeatureId = bestFeature.FeatureId;
}
}
void SelectBestSplit(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best,
TCudaStream stream) {
if (size > 0) {
const int blockSize = 1024;
hipLaunchKernelGGL(( SelectBestSplitImpl<blockSize>), dim3(1), dim3(blockSize), 0, stream, scores, binFeature, size, bestIndexBias, best);
}
}
__global__ void ZeroSameLeafBinWeightsImpl(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
if (binx == biny) {
pairWeights[i] = 0;
}
}
}
void ZeroSameLeafBinWeights(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights,
TCudaStream stream
) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( ZeroSameLeafBinWeightsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, pairs, bins, pairCount, pairWeights);
}
}
__global__ void FillPairBinsImpl(const uint2* pairs,
const ui32* bins,
ui32 rowSize,
ui32 pairCount,
ui32* pairBins) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
pairBins[i] = binx * rowSize + biny;
}
}
void FillPairBins(const uint2* pairs,
const ui32* bins,
ui32 binCount,
ui32 pairCount,
ui32* pairBins,
TCudaStream stream) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( FillPairBinsImpl), dim3(numBlocks), dim3(blockSize), 0, stream, pairs, bins, binCount, pairCount, pairBins);
}
}
//for leaves estimation
__global__ void FillPairDer2OnlyImpl(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
}
}
void FillPairDer2Only(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( FillPairDer2OnlyImpl), dim3(numBlocks), dim3(blockSize), 0, stream , ders2, groupDers2, qids, pairs, pairCount, pairDer2);
}
}
}
|
72fc887ea36389a517ba89fd526febb8b92d88f3.cu
|
#include "split_pairwise.cuh"
#include "split_properties_helpers.cuh"
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/instructions.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
namespace NKernel {
__forceinline__ __device__ void AddToMatrices(int row, int col, float sum,
float* matrix) {
const int ind = col < row ? (row * (row + 1) >> 1) + col : (col * (col + 1) >> 1) + row;
matrix[ind] += sum;
}
template <int BLOCK_SIZE>
__global__ void MakePairwiseDerivatives(const float* pairwiseHistogram,
int matrixOffset,
int matCount,
int partCount,
int histLineSize /* 4 * totalBinFeatureCount */,
float* linearSystem) {
const int matricesPerBlock = BLOCK_SIZE / partCount;
int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / partCount;
int x = threadIdx.x & (partCount - 1);
const int inBlockOffset = threadIdx.x / partCount;
if (matrixIdx >= matCount)
return;
{
const size_t rowSize = partCount * 2;
const size_t linearSystemSize = (rowSize + rowSize * (rowSize + 1) / 2);
linearSystem += matrixIdx * linearSystemSize;
}
pairwiseHistogram += (matrixOffset + matrixIdx) * 4;
__shared__ float lineData[BLOCK_SIZE * 2];
float* row0 = &lineData[inBlockOffset * partCount];
float* row1 = &lineData[inBlockOffset * partCount + BLOCK_SIZE];
float colSum0 = 0.0f;
float colSum1 = 0.0f;
for (int y = 0; y < partCount; ++y) {
const int partIdx = ConvertBlockToPart(x, y);
ui64 offset = ((ui64)partIdx * histLineSize * 4ULL);
const float w00 = (x != y ? __ldg(pairwiseHistogram + offset) : 0.0f);
const float w01 = __ldg(pairwiseHistogram + offset + 1);
const float w10 = __ldg(pairwiseHistogram + offset + 2);
const float w11 = (x != y ? __ldg(pairwiseHistogram + offset + 3) : 0.0f);
row0[x] = w01 + w00;
row1[x] = w10 + w11;
//symc for row write done in reduce if we need it
const float sum0 = FastInBlockReduce(x, row0, partCount);
const float sum1 = FastInBlockReduce(x, row1, partCount);
const int nextRow = 2 * y;
const int nextCol = 2 * x;
if (x == 0) {
AddToMatrices(nextRow, nextRow, sum0, linearSystem);
AddToMatrices(nextRow + 1, nextRow + 1, sum1, linearSystem);
}
colSum0 += w00 + w10;
colSum1 += w01 + w11;
if (x == y) {
AddToMatrices(nextRow + 1, nextRow, -(w01 + w10), linearSystem);
} else {
AddToMatrices(nextRow, nextCol, -w00, linearSystem);
AddToMatrices(nextRow , nextCol + 1, -w01, linearSystem);
AddToMatrices(nextRow + 1, nextCol, -w10, linearSystem);
AddToMatrices(nextRow + 1, nextCol + 1, -w11, linearSystem);
}
__syncthreads();
}
const int nextRow = 2 * x;
linearSystem[nextRow * (nextRow + 1) / 2 + nextRow] += colSum0;
linearSystem[(nextRow + 1) * (nextRow + 2) / 2 + nextRow + 1] += colSum1;
}
template <int BLOCK_SIZE>
void RunMakeMatrices(const float* histogram, int partCount, int histLineSize, int firstMatrix, int matricesCount, float* linearSystem, TCudaStream stream) {
if (matricesCount > 0) {
const int numBlocks = (((size_t) matricesCount) * partCount + BLOCK_SIZE - 1) / BLOCK_SIZE;
MakePairwiseDerivatives<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE, 0, stream >> > (histogram, firstMatrix, matricesCount, partCount, histLineSize, linearSystem);
}
}
void MakePairwiseDerivatives(const float* histogram, int leavesCount, int firstMatrix, int matricesCount, int histLineSize, float* linearSystem,
TCudaStream stream) {
if (TArchProps::GetMajorVersion() == 2 && (leavesCount <= 64)) {
RunMakeMatrices<192>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
} else {
RunMakeMatrices<256>(histogram, leavesCount, histLineSize, firstMatrix, matricesCount, linearSystem, stream);
}
}
template <int BLOCK_SIZE>
__global__ void MakePointwiseDerivatives(const float* pointwiseHist, ui64 pointwiseHistSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matCount,
float* linearSystem) {
const int lineSize = min(rowSize, 32);
const int matricesPerBlock = BLOCK_SIZE / lineSize;
const int matrixIdx = blockIdx.x * matricesPerBlock + threadIdx.x / lineSize;
pointwiseHist += (firstMatrixIdx + matrixIdx) * (hasPointwiseWeights ? 2 : 1);
linearSystem += ((size_t)matrixIdx) * (rowSize + rowSize * (rowSize + 1) / 2);
const int x = threadIdx.x & (lineSize - 1);
float* targets = linearSystem + rowSize * (rowSize + 1) / 2;
if (matrixIdx < matCount) {
for (int col = x; col < rowSize; col += 32) {
const int i = col / 2;
ui64 offset = pointwiseHistSize * i;
if (hasPointwiseWeights) {
const float leafWeight = pointwiseHist[offset];
const float weight = (col & 1) ? partStats[i].Weight - leafWeight : leafWeight;
linearSystem[col * (col + 1) / 2 + col] += max(weight, 0.0f);
}
const float leafSum = pointwiseHist[offset + hasPointwiseWeights];
const float sum = (col & 1) ? partStats[i].Sum - leafSum : leafSum;
targets[col] = sum;
}
}
}
template <int BLOCK_SIZE>
void RunMakePointwiseDerivatives(const float* pointwiseHist, int binFeatureCount,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream
) {
if (matricesCount > 0) {
const ui32 pointwiseHistSize = binFeatureCount * (hasPointwiseWeights ? 2 : 1);
const int lineSize = min(32, rowSize);
const int numBlocks = (((size_t) matricesCount) * lineSize + BLOCK_SIZE - 1) / BLOCK_SIZE;
MakePointwiseDerivatives<BLOCK_SIZE> << < numBlocks, BLOCK_SIZE,0, stream >> > (pointwiseHist, pointwiseHistSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem);
}
}
void MakePointwiseDerivatives(const float* pointwiseHist, int pointwiseHistLineSize,
const TPartitionStatistics* partStats,
bool hasPointwiseWeights,
int rowSize,
int firstMatrixIdx,
int matricesCount,
float* linearSystem,
TCudaStream stream) {
if (TArchProps::GetMajorVersion() == 2) {
RunMakePointwiseDerivatives<192> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
} else {
RunMakePointwiseDerivatives<128> (pointwiseHist, pointwiseHistLineSize, partStats, hasPointwiseWeights, rowSize, firstMatrixIdx, matricesCount, linearSystem, stream);
}
}
__global__ void UpdateBinsPairs(TCFeature feature, ui32 binIdx,
const ui32* cindex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins) {
ui32 idx = blockIdx.x * blockDim.x + threadIdx.x;
cindex += feature.Offset;
const ui32 value = binIdx << feature.Shift;
const ui32 mask = feature.Mask << feature.Shift;
while (idx < pairCount) {
const uint2 p = pairs[idx];
const ui32 d1 = (cindex[p.x] & mask);
const ui32 d2 = (cindex[p.y] & mask);
ui32 bit1 = feature.OneHotFeature ? d1 == value : d1 > value;
ui32 bit2 = feature.OneHotFeature ? d2 == value : d2 > value;
ui32 bin = bins[idx];
bin = ((bit1 * 2 + bit2) << (depth * 2)) | bin;
bins[idx] = bin;
idx += blockDim.x * gridDim.x;
}
}
void UpdateBinsPairs(TCFeature feature, ui32 bin,
const ui32* compressedIndex,
const uint2* pairs,
ui32 pairCount,
ui32 depth,
ui32* bins,
TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = min((pairCount + blockSize - 1) / blockSize,
TArchProps::MaxBlockCount());
UpdateBinsPairs<<<numBlocks, blockSize, 0, stream>>>(feature, bin, compressedIndex, pairs, pairCount, depth, bins);
}
template <int BLOCK_SIZE>
__global__ void SelectBestSplitImpl(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best) {
float maxScore = -5000000.0f;
int maxIdx = -1;
int tid = threadIdx.x;
#pragma unroll 8
for (int i = tid; i < size; i += BLOCK_SIZE) {
float score = scores[i];
if (score > maxScore) {
maxScore = score;
maxIdx = i;
}
}
__shared__ float vals[BLOCK_SIZE];
__shared__ int inds[BLOCK_SIZE];
vals[tid] = maxScore;
inds[tid] = maxIdx;
__syncthreads();
for (int s = BLOCK_SIZE >> 1; s > 0; s >>= 1) {
if (tid < s) {
if ( vals[tid] < vals[tid + s] || (vals[tid] == vals[tid + s] && inds[tid] > inds[tid + s]) ) {
vals[tid] = vals[tid + s];
inds[tid] = inds[tid + s];
}
}
__syncthreads();
}
if (tid == 0) {
TCBinFeature bestFeature;
const int bestIdx = inds[0];
const float bestScore = vals[0];
if (bestIdx != -1) {
bestFeature = binFeature[bestIdx];
} else {
bestFeature.BinId = 0;
bestFeature.FeatureId = 0;
}
best->Index = bestIndexBias + bestIdx;
best->Score = -bestScore;
best->BinId = bestFeature.BinId;
best->FeatureId = bestFeature.FeatureId;
}
}
void SelectBestSplit(const float* scores,
const TCBinFeature* binFeature, int size,
int bestIndexBias, TBestSplitPropertiesWithIndex* best,
TCudaStream stream) {
if (size > 0) {
const int blockSize = 1024;
SelectBestSplitImpl<blockSize><<<1, blockSize, 0, stream>>>(scores, binFeature, size, bestIndexBias, best);
}
}
__global__ void ZeroSameLeafBinWeightsImpl(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
if (binx == biny) {
pairWeights[i] = 0;
}
}
}
void ZeroSameLeafBinWeights(const uint2* pairs,
const ui32* bins,
ui32 pairCount,
float* pairWeights,
TCudaStream stream
) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
ZeroSameLeafBinWeightsImpl<<<numBlocks, blockSize, 0, stream>>>(pairs, bins, pairCount, pairWeights);
}
}
__global__ void FillPairBinsImpl(const uint2* pairs,
const ui32* bins,
ui32 rowSize,
ui32 pairCount,
ui32* pairBins) {
const ui32 i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < pairCount) {
uint2 pair = pairs[i];
const ui32 binx = bins[pair.x];
const ui32 biny = bins[pair.y];
pairBins[i] = binx * rowSize + biny;
}
}
void FillPairBins(const uint2* pairs,
const ui32* bins,
ui32 binCount,
ui32 pairCount,
ui32* pairBins,
TCudaStream stream) {
if (pairCount > 0) {
const int blockSize = 256;
const ui32 numBlocks = (pairCount + blockSize - 1) / blockSize;
FillPairBinsImpl<<<numBlocks, blockSize, 0, stream>>>(pairs, bins, binCount, pairCount, pairBins);
}
}
//for leaves estimation
__global__ void FillPairDer2OnlyImpl(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2) {
const int tid = threadIdx.x;
const int i = blockIdx.x * blockDim.x + tid;
if (i < pairCount) {
uint2 pair = Ldg(pairs + i);
const float der2x = Ldg(ders2 + pair.x);
const float der2y = Ldg(ders2 + pair.y);
const int qid = Ldg(qids + pair.x);
const float groupDer2 = Ldg(groupDers2 + qid);
pairDer2[i] = groupDer2 > 1e-20f ? der2x * der2y / (groupDer2 + 1e-20f) : 0;
}
}
void FillPairDer2Only(const float* ders2,
const float* groupDers2,
const ui32* qids,
const uint2* pairs,
ui32 pairCount,
float* pairDer2,
TCudaStream stream
) {
const int blockSize = 256;
const int numBlocks = (pairCount + blockSize - 1) / blockSize;
if (numBlocks > 0) {
FillPairDer2OnlyImpl<<< numBlocks, blockSize, 0, stream >>>(ders2, groupDers2, qids, pairs, pairCount, pairDer2);
}
}
}
|
d35ca184b11ee2b0a6267c44aeaae6b1160798c7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Maryam Dehnavi, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2019 Bogdan Simion and Maryam Dehnavi
* -------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define M 1024*1024
#define threads_block 512
#define MAX_ARR_SIZE 32
#define RUN_SIMPLE 1
#define RUN_THREADS 2
#define RUN_BLOCKS 3
#define RUN_TIMES 4
// Simple kernel: blocks, threads = 1, 1
__global__
void array_add_simple (float *a, float *b, int N)
{
}
// Simple kernel: blocks, threads = 1, 512
__global__
void array_add_threads_only (float *a, float *b, int N)
{
}
// Complex kernel, utilize both blocks and threads
__global__
void array_add_threads_blocks (float *a, float *b, int N)
{
}
// Complex kernel, utilize both blocks and threads
// Add b elements 'times' number of times
__global__
void array_add_times (float *a, float *b, int N, int times)
{
}
/*Initialize the device arrays, timing variables, call kernels
with the right number of threads and blocks
*/
void run_test(int arrsize, int times, int type)
{
float *a_h, *b_h;
float transfer_in, computation_time, transfer_out; // timing values
int N = arrsize*M;
size_t SIZE = N * sizeof(float);
a_h = (float*)malloc(SIZE);
b_h = (float*)malloc(SIZE);
srand(time(NULL));
for(int i = 0; i < N; i++)
{
a_h[i] = (rand() % 10000) / 100.0f;
b_h[i] = (rand() % 10000) / 100.0f;
}
if (type == RUN_SIMPLE)
{
}
else if (type == RUN_THREADS)
{
}
else if (type == RUN_BLOCKS)
{
}
else if (type == RUN_TIMES)
{
}
else
{
printf("Unknown run type\n");
}
// print timing results
printf("%5d %5d %15.2f %15.2f %15.2f\n", times, arrsize, transfer_in,
computation_time, transfer_out);
}
int main(int argc, char* argv[]) {
int arrsize = 1;
int i;
// Run with blocks, threads = 1,1
// Number of times is constant(once), array size varies
printf("Times Size(M) TransferIn(ms) Computation(ms) TransferOut(ms)\n");
for(arrsize = 1; arrsize <= MAX_ARR_SIZE; i++, arrsize*=2)
{
run_test(arrsize, 1, RUN_SIMPLE);
}
// Run with several blocks and threads
// Number of times is constant(once), array size varies
printf("\nTimes Size(M) TransferIn(ms) Computation(ms) TransferOut(ms)\n");
for(arrsize = 1; arrsize <= MAX_ARR_SIZE; i++, arrsize*=2)
{
run_test(arrsize, 1, RUN_THREADS);
}
// Run with several blocks and threads
// Number of times is constant(once), array size varies
printf("\nTimes Size(M) TransferIn(ms) Computation(ms) TransferOut(ms)\n");
for(arrsize = 1; arrsize <= MAX_ARR_SIZE; i++, arrsize*=2)
{
run_test(arrsize, 1, RUN_BLOCKS);
}
// Number of times varies, array size is constant (maximum number of elem)
printf("\nTimes Size(M) TransferIn(ms) Computation(ms) TransferOut(ms)\n");
int times = 1;
arrsize = MAX_ARR_SIZE;
for(i = 0; i < 10; i++, times*=2)
{
run_test(arrsize, times, RUN_TIMES);
}
}
|
d35ca184b11ee2b0a6267c44aeaae6b1160798c7.cu
|
/* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Maryam Dehnavi, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2019 Bogdan Simion and Maryam Dehnavi
* -------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define M 1024*1024
#define threads_block 512
#define MAX_ARR_SIZE 32
#define RUN_SIMPLE 1
#define RUN_THREADS 2
#define RUN_BLOCKS 3
#define RUN_TIMES 4
// Simple kernel: blocks, threads = 1, 1
__global__
void array_add_simple (float *a, float *b, int N)
{
}
// Simple kernel: blocks, threads = 1, 512
__global__
void array_add_threads_only (float *a, float *b, int N)
{
}
// Complex kernel, utilize both blocks and threads
__global__
void array_add_threads_blocks (float *a, float *b, int N)
{
}
// Complex kernel, utilize both blocks and threads
// Add b elements 'times' number of times
__global__
void array_add_times (float *a, float *b, int N, int times)
{
}
/*Initialize the device arrays, timing variables, call kernels
with the right number of threads and blocks
*/
void run_test(int arrsize, int times, int type)
{
float *a_h, *b_h;
float transfer_in, computation_time, transfer_out; // timing values
int N = arrsize*M;
size_t SIZE = N * sizeof(float);
a_h = (float*)malloc(SIZE);
b_h = (float*)malloc(SIZE);
srand(time(NULL));
for(int i = 0; i < N; i++)
{
a_h[i] = (rand() % 10000) / 100.0f;
b_h[i] = (rand() % 10000) / 100.0f;
}
if (type == RUN_SIMPLE)
{
}
else if (type == RUN_THREADS)
{
}
else if (type == RUN_BLOCKS)
{
}
else if (type == RUN_TIMES)
{
}
else
{
printf("Unknown run type\n");
}
// print timing results
printf("%5d %5d %15.2f %15.2f %15.2f\n", times, arrsize, transfer_in,
computation_time, transfer_out);
}
int main(int argc, char* argv[]) {
int arrsize = 1;
int i;
// Run with blocks, threads = 1,1
// Number of times is constant(once), array size varies
printf("Times Size(M) TransferIn(ms) Computation(ms) TransferOut(ms)\n");
for(arrsize = 1; arrsize <= MAX_ARR_SIZE; i++, arrsize*=2)
{
run_test(arrsize, 1, RUN_SIMPLE);
}
// Run with several blocks and threads
// Number of times is constant(once), array size varies
printf("\nTimes Size(M) TransferIn(ms) Computation(ms) TransferOut(ms)\n");
for(arrsize = 1; arrsize <= MAX_ARR_SIZE; i++, arrsize*=2)
{
run_test(arrsize, 1, RUN_THREADS);
}
// Run with several blocks and threads
// Number of times is constant(once), array size varies
printf("\nTimes Size(M) TransferIn(ms) Computation(ms) TransferOut(ms)\n");
for(arrsize = 1; arrsize <= MAX_ARR_SIZE; i++, arrsize*=2)
{
run_test(arrsize, 1, RUN_BLOCKS);
}
// Number of times varies, array size is constant (maximum number of elem)
printf("\nTimes Size(M) TransferIn(ms) Computation(ms) TransferOut(ms)\n");
int times = 1;
arrsize = MAX_ARR_SIZE;
for(i = 0; i < 10; i++, times*=2)
{
run_test(arrsize, times, RUN_TIMES);
}
}
|
58698523a37e7ae4b60d73da202966f615f77f26.hip
|
// !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include "support.h"
#include "kernel.hip"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
unsigned int *in_h;
uint8_t* bins_h;
unsigned int *in_d;
uint8_t* bins_d;
unsigned int num_elements, num_bins;
hipError_t cuda_ret;
if(argc == 1) {
num_elements = 1000000;
num_bins = 4096;
} else if(argc == 2) {
num_elements = atoi(argv[1]);
num_bins = 4096;
} else if(argc == 3) {
num_elements = atoi(argv[1]);
num_bins = atoi(argv[2]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./histogram # Input: 1,000,000, Bins: 4,096"
"\n Usage: ./histogram <m> # Input: m, Bins: 4,096"
"\n Usage: ./histogram <m> <n> # Input: m, Bins: n"
"\n");
exit(0);
}
initVector(&in_h, num_elements, num_bins);
bins_h = (uint8_t*) malloc(num_bins*sizeof(uint8_t));
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Input size = %u\n Number of bins = %u\n", num_elements,
num_bins);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
cuda_ret = hipMalloc((void**)&in_d, num_elements * sizeof(unsigned int));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
cuda_ret = hipMalloc((void**)&bins_d, num_bins * sizeof(uint8_t));
if(cuda_ret != hipSuccess) FATAL("Unable to allocate device memory");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
cuda_ret = hipMemcpy(in_d, in_h, num_elements * sizeof(unsigned int),
hipMemcpyHostToDevice);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to the device");
cuda_ret = hipMemset(bins_d, 0, num_bins * sizeof(uint8_t));
if(cuda_ret != hipSuccess) FATAL("Unable to set device memory");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
histogram(in_d, bins_d, num_elements, num_bins);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
cuda_ret = hipMemcpy(bins_h, bins_d, num_bins * sizeof(uint8_t),
hipMemcpyDeviceToHost);
if(cuda_ret != hipSuccess) FATAL("Unable to copy memory to host");
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(in_h, bins_h, num_elements, num_bins);
// Free memory ------------------------------------------------------------
hipFree(in_d); hipFree(bins_d);
free(in_h); free(bins_h);
return 0;
}
|
58698523a37e7ae4b60d73da202966f615f77f26.cu
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#include <stdint.h>
#include "support.h"
#include "kernel.cu"
int main(int argc, char* argv[])
{
Timer timer;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
unsigned int *in_h;
uint8_t* bins_h;
unsigned int *in_d;
uint8_t* bins_d;
unsigned int num_elements, num_bins;
cudaError_t cuda_ret;
if(argc == 1) {
num_elements = 1000000;
num_bins = 4096;
} else if(argc == 2) {
num_elements = atoi(argv[1]);
num_bins = 4096;
} else if(argc == 3) {
num_elements = atoi(argv[1]);
num_bins = atoi(argv[2]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./histogram # Input: 1,000,000, Bins: 4,096"
"\n Usage: ./histogram <m> # Input: m, Bins: 4,096"
"\n Usage: ./histogram <m> <n> # Input: m, Bins: n"
"\n");
exit(0);
}
initVector(&in_h, num_elements, num_bins);
bins_h = (uint8_t*) malloc(num_bins*sizeof(uint8_t));
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" Input size = %u\n Number of bins = %u\n", num_elements,
num_bins);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
cuda_ret = cudaMalloc((void**)&in_d, num_elements * sizeof(unsigned int));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
cuda_ret = cudaMalloc((void**)&bins_d, num_bins * sizeof(uint8_t));
if(cuda_ret != cudaSuccess) FATAL("Unable to allocate device memory");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
cuda_ret = cudaMemcpy(in_d, in_h, num_elements * sizeof(unsigned int),
cudaMemcpyHostToDevice);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to the device");
cuda_ret = cudaMemset(bins_d, 0, num_bins * sizeof(uint8_t));
if(cuda_ret != cudaSuccess) FATAL("Unable to set device memory");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel ----------------------------------------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
histogram(in_d, bins_d, num_elements, num_bins);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch/execute kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables from host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
cuda_ret = cudaMemcpy(bins_h, bins_d, num_bins * sizeof(uint8_t),
cudaMemcpyDeviceToHost);
if(cuda_ret != cudaSuccess) FATAL("Unable to copy memory to host");
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(in_h, bins_h, num_elements, num_bins);
// Free memory ------------------------------------------------------------
cudaFree(in_d); cudaFree(bins_d);
free(in_h); free(bins_h);
return 0;
}
|
9cd6369e8fa692686ee027c80ea5869bfa84b060.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<time.h>
#include<iostream>
#define w 256
#define h 256
#define N w*h
using namespace std;
__global__ void reduce(int*,int*,int*);
int main(void)
{
int* hostA = (int*)malloc(N*sizeof(int));
int* hostB = (int*)malloc(N*sizeof(int));
int* hostMean = (int*)malloc(sizeof(int));
*hostMean = 32767;
int* deviceA; int *deviceB;int*deviceMean;
hipMalloc(&deviceA,sizeof(int)*N);
hipMalloc(&deviceB,sizeof(int)*N);
hipMalloc(&deviceMean,sizeof(int));
//randomly generate array hostA
srand(time(0));
int i;
//initialize host vector by random elements
for(i=0;i<N;i++)
{
hostA[i] = i;
}
hostB[0]=0.0;
hipMemcpy(deviceA,hostA,N*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(deviceB,hostB,N*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(deviceMean,hostMean,sizeof(int),hipMemcpyHostToDevice);
dim3 blocksize(256);
dim3 gridsize(N/blocksize.x);
float gpu_elapsed_time;
hipEvent_t gpu_start,gpu_stop;
hipEventCreate(&gpu_start);
hipEventCreate(&gpu_stop);
hipEventRecord(gpu_start,0);
hipLaunchKernelGGL(( reduce), dim3(gridsize),dim3(blocksize), 0, 0, deviceA,deviceB,deviceMean);
hipDeviceSynchronize();
hipMemcpy(hostB,deviceB,sizeof(int),hipMemcpyDeviceToHost);
hipEventRecord(gpu_stop, 0);
hipEventSynchronize(gpu_stop);
hipEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
hipEventDestroy(gpu_start);
hipEventDestroy(gpu_stop);
double std_dev = pow(hostB[0]/(N),0.5);
cout<<"Reduced array standard deviation is = "<<std_dev<<endl;
std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl;
clock_t cpu_start = clock();
int sum=0;
for(int i=0;i<N;i++){
sum = sum + int(pow((hostA[i] - (*hostMean)),2.0));
}
//cout<<"sum == "<<sum<<endl;
double std_dev_actual = pow(sum/(N),0.5);
printf("Actual value of standard deviation should be: %f \n", std_dev_actual);
clock_t cpu_stop = clock();
clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl;
hipFree(deviceA);
hipFree(deviceB);
delete[] hostB;
delete[] hostA;
}
__global__ void reduce(int* input,int* output,int* mean)
{
__shared__ int shared_data[256];
int i = blockIdx.x*blockDim.x+threadIdx.x;
shared_data[threadIdx.x] = int( pow(double(input[i]- *mean),2.0));
__syncthreads();
for(int s=1;s<blockDim.x;s*=2)
{
int index = 2 * s * threadIdx.x;;
if (index < blockDim.x)
{
shared_data[index] += shared_data[index + s];
}
__syncthreads();
}
if (threadIdx.x == 0)
atomicAdd(output,shared_data[0]);
}
|
9cd6369e8fa692686ee027c80ea5869bfa84b060.cu
|
#include<stdio.h>
#include<time.h>
#include<iostream>
#define w 256
#define h 256
#define N w*h
using namespace std;
__global__ void reduce(int*,int*,int*);
int main(void)
{
int* hostA = (int*)malloc(N*sizeof(int));
int* hostB = (int*)malloc(N*sizeof(int));
int* hostMean = (int*)malloc(sizeof(int));
*hostMean = 32767;
int* deviceA; int *deviceB;int*deviceMean;
cudaMalloc(&deviceA,sizeof(int)*N);
cudaMalloc(&deviceB,sizeof(int)*N);
cudaMalloc(&deviceMean,sizeof(int));
//randomly generate array hostA
srand(time(0));
int i;
//initialize host vector by random elements
for(i=0;i<N;i++)
{
hostA[i] = i;
}
hostB[0]=0.0;
cudaMemcpy(deviceA,hostA,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(deviceMean,hostMean,sizeof(int),cudaMemcpyHostToDevice);
dim3 blocksize(256);
dim3 gridsize(N/blocksize.x);
float gpu_elapsed_time;
cudaEvent_t gpu_start,gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
cudaEventRecord(gpu_start,0);
reduce<<<gridsize,blocksize>>>(deviceA,deviceB,deviceMean);
cudaDeviceSynchronize();
cudaMemcpy(hostB,deviceB,sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
double std_dev = pow(hostB[0]/(N),0.5);
cout<<"Reduced array standard deviation is = "<<std_dev<<endl;
std::cout<<"The gpu took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl;
clock_t cpu_start = clock();
int sum=0;
for(int i=0;i<N;i++){
sum = sum + int(pow((hostA[i] - (*hostMean)),2.0));
}
//cout<<"sum == "<<sum<<endl;
double std_dev_actual = pow(sum/(N),0.5);
printf("Actual value of standard deviation should be: %f \n", std_dev_actual);
clock_t cpu_stop = clock();
clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
std::cout<<"The cpu took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl;
cudaFree(deviceA);
cudaFree(deviceB);
delete[] hostB;
delete[] hostA;
}
__global__ void reduce(int* input,int* output,int* mean)
{
__shared__ int shared_data[256];
int i = blockIdx.x*blockDim.x+threadIdx.x;
shared_data[threadIdx.x] = int( pow(double(input[i]- *mean),2.0));
__syncthreads();
for(int s=1;s<blockDim.x;s*=2)
{
int index = 2 * s * threadIdx.x;;
if (index < blockDim.x)
{
shared_data[index] += shared_data[index + s];
}
__syncthreads();
}
if (threadIdx.x == 0)
atomicAdd(output,shared_data[0]);
}
|
01b8ab22b4d711563901c03fe4ec3389cdf502f8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index >= n)
return;
bools[index] = (idata[index] == 0 ? 0 : 1);
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index >= n)
return;
if (bools[index] == 1)
{
odata[indices[index]] = idata[index];
}
}
}
}
|
01b8ab22b4d711563901c03fe4ec3389cdf502f8.cu
|
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index >= n)
return;
bools[index] = (idata[index] == 0 ? 0 : 1);
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index >= n)
return;
if (bools[index] == 1)
{
odata[indices[index]] = idata[index];
}
}
}
}
|
f95603aef33f60a46852a04c59e30cdd655653c3.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include "internal.hpp"
#include "hip/hip_runtime.h"
namespace pcl
{
namespace device
{
struct InSphere
{
float x_, y_, z_, radius2_;
InSphere(float x, float y, float z, float radius) : x_(x), y_(y), z_(z), radius2_(radius * radius) {}
__device__ __host__ __forceinline__ bool operator()(const float3& point) const
{
float dx = point.x - x_;
float dy = point.y - y_;
float dz = point.z - z_;
return (dx * dx + dy * dy + dz * dz) < radius2_;
}
__device__ __host__ __forceinline__ bool operator()(const float4& point) const
{
return (*this)(make_float3(point.x, point.y, point.z));
}
};
}
}
void pcl::device::bruteForceRadiusSearch(const OctreeImpl::PointCloud& cloud, const OctreeImpl::PointType& query, float radius, DeviceArray<int>& result, DeviceArray<int>& buffer)
{
using PointType = OctreeImpl::PointType;
if (buffer.size() < cloud.size())
buffer.create(cloud.size());
InSphere cond(query.x, query.y, query.z, radius);
thrust::device_ptr<const PointType> cloud_ptr((const PointType*)cloud.ptr());
thrust::device_ptr<int> res_ptr(buffer.ptr());
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first + cloud.size();
//main bottle neck is a kernel call overhead/allocs
//work time for 871k points ~0.8ms
int count = (int)(thrust::copy_if(first, last, cloud_ptr, res_ptr, cond) - res_ptr);
result = DeviceArray<int>(buffer.ptr(), count);
}
|
f95603aef33f60a46852a04c59e30cdd655653c3.cu
|
/*
* Software License Agreement (BSD License)
*
* Copyright (c) 2011, Willow Garage, Inc.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* Author: Anatoly Baskeheev, Itseez Ltd, ([email protected])
*/
#include <thrust/copy.h>
#include <thrust/device_ptr.h>
#include <thrust/iterator/counting_iterator.h>
#include "internal.hpp"
#include "cuda.h"
namespace pcl
{
namespace device
{
struct InSphere
{
float x_, y_, z_, radius2_;
InSphere(float x, float y, float z, float radius) : x_(x), y_(y), z_(z), radius2_(radius * radius) {}
__device__ __host__ __forceinline__ bool operator()(const float3& point) const
{
float dx = point.x - x_;
float dy = point.y - y_;
float dz = point.z - z_;
return (dx * dx + dy * dy + dz * dz) < radius2_;
}
__device__ __host__ __forceinline__ bool operator()(const float4& point) const
{
return (*this)(make_float3(point.x, point.y, point.z));
}
};
}
}
void pcl::device::bruteForceRadiusSearch(const OctreeImpl::PointCloud& cloud, const OctreeImpl::PointType& query, float radius, DeviceArray<int>& result, DeviceArray<int>& buffer)
{
using PointType = OctreeImpl::PointType;
if (buffer.size() < cloud.size())
buffer.create(cloud.size());
InSphere cond(query.x, query.y, query.z, radius);
thrust::device_ptr<const PointType> cloud_ptr((const PointType*)cloud.ptr());
thrust::device_ptr<int> res_ptr(buffer.ptr());
thrust::counting_iterator<int> first(0);
thrust::counting_iterator<int> last = first + cloud.size();
//main bottle neck is a kernel call overhead/allocs
//work time for 871k points ~0.8ms
int count = (int)(thrust::copy_if(first, last, cloud_ptr, res_ptr, cond) - res_ptr);
result = DeviceArray<int>(buffer.ptr(), count);
}
|
0d6bb28b82f85eda29678795719640ba9af74b4e.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2019 Australian National University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either or express implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <hip/hip_runtime.h>
#include <stdio.h>
// this macro checks for errors in CUDA calls
#define Err(ans) \
{ gpucheck((ans), __FILE__, __LINE__); }
inline void gpucheck(hipError_t code, const char *file, int line) {
if (code != hipSuccess) {
fprintf(stderr, "GPU Err: %s %s %d\n", hipGetErrorString(code),
file, line);
exit(code);
}
}
__global__ void hello(char *res, int size) {
char str[] = "Hello World!";
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
res[idx] = str[idx];
}
int main(void) {
char *str_h, *str_d;
const int size = 13;
Err(hipHostMalloc(&str_h, size)); // note we could just use a normal
// malloc although this gives us
// pinned memory
Err(hipMalloc(&str_d, size));
hipLaunchKernelGGL(( hello), dim3(1), dim3(13), 0, 0, str_d, size);
Err(hipMemcpy(str_h, str_d, size, hipMemcpyDeviceToHost));
printf("Result : %s\n", str_h);
hipFree(str_h);
hipFree(str_d);
}
|
0d6bb28b82f85eda29678795719640ba9af74b4e.cu
|
/*
* Copyright 2019 Australian National University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either or express implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#include <stdio.h>
// this macro checks for errors in CUDA calls
#define Err(ans) \
{ gpucheck((ans), __FILE__, __LINE__); }
inline void gpucheck(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPU Err: %s %s %d\n", cudaGetErrorString(code),
file, line);
exit(code);
}
}
__global__ void hello(char *res, int size) {
char str[] = "Hello World!";
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
res[idx] = str[idx];
}
int main(void) {
char *str_h, *str_d;
const int size = 13;
Err(cudaMallocHost(&str_h, size)); // note we could just use a normal
// malloc although this gives us
// pinned memory
Err(cudaMalloc(&str_d, size));
hello<<<1, 13>>>(str_d, size);
Err(cudaMemcpy(str_h, str_d, size, cudaMemcpyDeviceToHost));
printf("Result : %s\n", str_h);
cudaFree(str_h);
cudaFree(str_d);
}
|
bf5e001c2ed3dcd1fb220fd9b24f3e2320b67cba.hip
|
// !!! This is a file automatically generated by hipify!!!
/**
* @file pctdemo_life_mex_shmem.cu
* @brief Example of implementing a stencil operation on the GPU using shared memory.
*
* Copyright 2013 The MathWorks, Inc.
*/
#include <stdint.h>
#include <algorithm>
#include <math.h>
#include <hip/hip_runtime_api.h>
#include "get_shortest_sort_mex.hpp"
#include "mex.h"
/**
* Host function called by MEX gateway. Sets up and calls the device function
* for each generation.
*/
#define CUDART_PI_F 3.141592654f
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
mexPrintf("GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__
void complex_mult(const float & a_1, const float & a_2, const float & b_1, const float & b_2, float* c_1, float* c_2) {
*c_1 = a_1 * b_1 - a_2 * b_2;
*c_2 = a_1 * b_2 + a_2 * b_1;
}
__device__
float modulo_float(const float & a, const float & b) {
return fmodf(fmodf(a,b) + b , b);
}
__global__
void dipole_compute_kernel_test(float * const pResidue, int32_t* const pPositive, float * const pOutArray, float * const pOut2Array,
int const dim_out_1, int const dim_out_2, int const dim_out_3, int const dims_Positive, int const direction)
{
}
__global__
void set_to_minus_1(int32_t * const to_init, const int to_init_size)
{
int id_1D = threadIdx.x + blockIdx.x * blockDim.x;
if (id_1D < to_init_size) { // because can be change to negative if all use when removing dipole along another direction
to_init[id_1D] = -1;
}
}
#define GPU_COMPUTE // so the function in search universal ar addapted for gpu use
#include "search_universal.cpp"
int shortest_sort_KERNEL(int32_t const * const pResidue, int32_t const * const pLookup, int32_t const * const pLookup_z, int32_t const * const pSquareSize, int32_t * const pNearest_res, int residue_number_1D, int precompute_number, int32_t* const size_residue_3D, int lookup_size)
{
int const size_3D_1 = size_residue_3D[0];
int const size_3D_2 = size_residue_3D[1];
int const size_3D_3 = size_residue_3D[2];
int blockSize;
int minGridSize;
int gridSize;
//find the nearest neighbour
int arrayCount = residue_number_1D; // because itterates for each positive residue
hipOccupancyMaxPotentialBlockSize(&minGridSize,&blockSize,(void*)shortest_compute_kernel,0,arrayCount);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
shortest_compute_kernel << <gridSize, blockSize >> > (pResidue, pLookup, pLookup_z, pSquareSize, pNearest_res, residue_number_1D, precompute_number, size_3D_1, size_3D_2, size_3D_3, lookup_size);
//gpuErrchk(hipPeekAtLastError());
//gpuErrchk(hipDeviceSynchronize());
//hipDeviceSynchronize();
return 1;
}
|
bf5e001c2ed3dcd1fb220fd9b24f3e2320b67cba.cu
|
/**
* @file pctdemo_life_mex_shmem.cu
* @brief Example of implementing a stencil operation on the GPU using shared memory.
*
* Copyright 2013 The MathWorks, Inc.
*/
#include <stdint.h>
#include <algorithm>
#include <math.h>
#include <cuda_runtime_api.h>
#include "get_shortest_sort_mex.hpp"
#include "mex.h"
/**
* Host function called by MEX gateway. Sets up and calls the device function
* for each generation.
*/
#define CUDART_PI_F 3.141592654f
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
mexPrintf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__
void complex_mult(const float & a_1, const float & a_2, const float & b_1, const float & b_2, float* c_1, float* c_2) {
*c_1 = a_1 * b_1 - a_2 * b_2;
*c_2 = a_1 * b_2 + a_2 * b_1;
}
__device__
float modulo_float(const float & a, const float & b) {
return fmodf(fmodf(a,b) + b , b);
}
__global__
void dipole_compute_kernel_test(float * const pResidue, int32_t* const pPositive, float * const pOutArray, float * const pOut2Array,
int const dim_out_1, int const dim_out_2, int const dim_out_3, int const dims_Positive, int const direction)
{
}
__global__
void set_to_minus_1(int32_t * const to_init, const int to_init_size)
{
int id_1D = threadIdx.x + blockIdx.x * blockDim.x;
if (id_1D < to_init_size) { // because can be change to negative if all use when removing dipole along another direction
to_init[id_1D] = -1;
}
}
#define GPU_COMPUTE // so the function in search universal ar addapted for gpu use
#include "search_universal.cpp"
int shortest_sort_KERNEL(int32_t const * const pResidue, int32_t const * const pLookup, int32_t const * const pLookup_z, int32_t const * const pSquareSize, int32_t * const pNearest_res, int residue_number_1D, int precompute_number, int32_t* const size_residue_3D, int lookup_size)
{
int const size_3D_1 = size_residue_3D[0];
int const size_3D_2 = size_residue_3D[1];
int const size_3D_3 = size_residue_3D[2];
int blockSize;
int minGridSize;
int gridSize;
//find the nearest neighbour
int arrayCount = residue_number_1D; // because itterates for each positive residue
cudaOccupancyMaxPotentialBlockSize(&minGridSize,&blockSize,(void*)shortest_compute_kernel,0,arrayCount);
// Round up according to array size
gridSize = (arrayCount + blockSize - 1) / blockSize;
shortest_compute_kernel << <gridSize, blockSize >> > (pResidue, pLookup, pLookup_z, pSquareSize, pNearest_res, residue_number_1D, precompute_number, size_3D_1, size_3D_2, size_3D_3, lookup_size);
//gpuErrchk(cudaPeekAtLastError());
//gpuErrchk(cudaDeviceSynchronize());
//cudaDeviceSynchronize();
return 1;
}
|
8e21dba740001602b7cf724f98127b7923eb3653.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "ATen/native/TensorTransformations.h"
#include "ATen/hip/detail/IndexUtils.cuh"
#include "ATen/NativeFunctions.h"
#include "ATen/hip/HIPTensorMethods.cuh"
#include "ATen/hip/HIPTypeConversion.cuh"
#include <cstddef>
#include <vector>
namespace at {
namespace native {
#define AT_APPLY_THREADS_PER_BLOCK 32 * 16
#define AT_APPLY_BLOCKS_PER_SM 4
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntList dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (dims[0] == 0 || dims[0] == total_dims - 1)) {
auto out_tensor = at::empty_like(self);
AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] {
using cuda_scalar_t = cuda::into_type<scalar_t>;
auto in_tensor_info = cuda::detail::getTensorInfo<cuda_scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<cuda_scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(dims[0]);
out_tensor_info.collapseDims(dims[0]);
hipLaunchKernelGGL(( kernel_pointwise_flip_apply2<cuda_scalar_t, int64_t>)
, dim3(dim_grid), dim3(dim_block), 0, globalContext().getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
});
return out_tensor;
}
auto flip_dims = std::vector<int64_t>(dims);
auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())});
auto shape = std::vector<int64_t>(in_tensor.sizes());
auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())});
auto strides = std::vector<int64_t>(in_tensor.strides());
auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())});
auto out_tensor = at::empty_like(in_tensor);
// stride_contiguous is the stride of non-contiguous tensor after called contiguous(), it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>();
int64_t tmp = N;
for (int64_t i = 0; i < total_dims; i++) {
tmp = tmp / shape[i];
stride_contiguous_d[i] = tmp;
}
AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] {
using cuda_scalar_t = cuda::into_type<scalar_t>;
hipLaunchKernelGGL(( flip_cuda_kernel), dim3(dim_grid), dim3(dim_block), 0, globalContext().getCurrentHIPStreamMasqueradingAsCUDA(),
in_tensor.data<cuda_scalar_t>(), out_tensor.data<cuda_scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size, strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims);
});
return out_tensor;
}
}} // namespace at::native
|
8e21dba740001602b7cf724f98127b7923eb3653.cu
|
#include "ATen/native/TensorTransformations.h"
#include "ATen/cuda/detail/IndexUtils.cuh"
#include "ATen/NativeFunctions.h"
#include "ATen/cuda/CUDATensorMethods.cuh"
#include "ATen/cuda/CUDATypeConversion.cuh"
#include <cstddef>
#include <vector>
namespace at {
namespace native {
#define AT_APPLY_THREADS_PER_BLOCK 32 * 16
#define AT_APPLY_BLOCKS_PER_SM 4
template <typename scalar_t, typename IndexType>
#if __CUDA_ARCH__ >= 350
__launch_bounds__(AT_APPLY_THREADS_PER_BLOCK, AT_APPLY_BLOCKS_PER_SM)
#endif
__global__ void
kernel_pointwise_flip_apply2(const cuda::detail::TensorInfo<scalar_t, IndexType> in_tensor_info,
cuda::detail::TensorInfo<scalar_t, IndexType> out_tensor_info,
IndexType N,
int flip_dim,
IndexType total_dims) {
for (IndexType linear_index = blockIdx.x * blockDim.x + threadIdx.x; linear_index < N; linear_index += gridDim.x * blockDim.x) {
IndexType dst_offset = 0;
if (flip_dim == 0) {
// flip 1st dim
dst_offset = (in_tensor_info.sizes[0] - 1 - linear_index / in_tensor_info.strides[0]) * in_tensor_info.strides[0] + linear_index % in_tensor_info.strides[0];
}
else {
// flip last dim
IndexType i = total_dims - 1;
dst_offset = linear_index / in_tensor_info.strides[0] * in_tensor_info.strides[0] + (in_tensor_info.sizes[i] - 1 - linear_index % in_tensor_info.strides[0]);
}
out_tensor_info.data[dst_offset] = in_tensor_info.data[linear_index];
}
}
template <typename scalar_t>
__global__
void flip_cuda_kernel(scalar_t* in_tensor, scalar_t* out_tensor, int64_t N, int64_t* flip_dims, int64_t flip_dims_size, int64_t* strides, int64_t* strides_contiguous, int64_t* shape, int64_t total_dims) {
int64_t linear_index = blockIdx.x * blockDim.x + threadIdx.x;
if (linear_index >= N) {
return;
}
int64_t cur_indices = linear_index, rem = 0, dst_offset = 0;
for (int64_t i = 0; i < total_dims; i++) {
int64_t temp = cur_indices;
cur_indices = cur_indices / strides_contiguous[i];
rem = temp - cur_indices * strides_contiguous[i];
// flip the indices if it is in flip_dims
for (int64_t j = 0; j < flip_dims_size; j++) {
if (i == flip_dims[j]) {
cur_indices = shape[i] - 1 - cur_indices;
}
}
dst_offset += cur_indices * strides[i];
cur_indices = rem;
}
out_tensor[linear_index] = in_tensor[dst_offset];
}
// Flip tensor given a list of dims
Tensor flip_cuda(const Tensor& self, IntList dims) {
auto in_tensor = self;
const int64_t flip_dims_size = dims.size(), total_dims = in_tensor.dim(), N = in_tensor.numel();
check_errors(total_dims, flip_dims_size, dims);
int64_t block_size = 512;
dim3 dim_block(block_size);
dim3 dim_grid((N + block_size - 1) / block_size);
// use kernel_pointwise_flip_apply2 only when to-flip dim is the 1st or last dim, where collapseDims can reduce the amount of work
if (flip_dims_size == 1 && in_tensor.is_contiguous() && (dims[0] == 0 || dims[0] == total_dims - 1)) {
auto out_tensor = at::empty_like(self);
AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] {
using cuda_scalar_t = cuda::into_type<scalar_t>;
auto in_tensor_info = cuda::detail::getTensorInfo<cuda_scalar_t, int64_t>(in_tensor);
auto out_tensor_info = cuda::detail::getTensorInfo<cuda_scalar_t, int64_t>(out_tensor);
int flip_dim = in_tensor_info.collapseDims(dims[0]);
out_tensor_info.collapseDims(dims[0]);
kernel_pointwise_flip_apply2<cuda_scalar_t, int64_t>
<<<dim_grid, dim_block, 0, globalContext().getCurrentCUDAStream()>>>(
in_tensor_info, out_tensor_info, N, flip_dim, total_dims);
});
return out_tensor;
}
auto flip_dims = std::vector<int64_t>(dims);
auto flip_dims_t = at::CPU(kLong).tensorFromBlob(flip_dims.data(), {static_cast<int64_t>(flip_dims.size())});
auto shape = std::vector<int64_t>(in_tensor.sizes());
auto shape_t = at::CPU(kLong).tensorFromBlob(shape.data(), {static_cast<int64_t>(shape.size())});
auto strides = std::vector<int64_t>(in_tensor.strides());
auto strides_t = at::CPU(kLong).tensorFromBlob(strides.data(), {static_cast<int64_t>(strides.size())});
auto out_tensor = at::empty_like(in_tensor);
// stride_contiguous is the stride of non-contiguous tensor after called contiguous(), it is used to compute indices for each element in non-contiguous tensor
Tensor stride_contiguous = at::zeros({total_dims}, kLong);
int64_t* stride_contiguous_d = stride_contiguous.data<int64_t>();
int64_t tmp = N;
for (int64_t i = 0; i < total_dims; i++) {
tmp = tmp / shape[i];
stride_contiguous_d[i] = tmp;
}
AT_DISPATCH_ALL_TYPES_AND_HALF(in_tensor.type(), "flip_cuda", [&] {
using cuda_scalar_t = cuda::into_type<scalar_t>;
flip_cuda_kernel<<<dim_grid, dim_block, 0, globalContext().getCurrentCUDAStream()>>>(
in_tensor.data<cuda_scalar_t>(), out_tensor.data<cuda_scalar_t>(), N, flip_dims_t.toType(CUDA(kLong)).data<int64_t>(), flip_dims_size, strides_t.toType(CUDA(kLong)).data<int64_t>(), stride_contiguous.toType(CUDA(kLong)).data<int64_t>(), shape_t.toType(CUDA(kLong)).data<int64_t>(), total_dims);
});
return out_tensor;
}
}} // namespace at::native
|
1a24c6f7c3d01f1e33e83c0c22ff9a4ab5fe1895.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void add(int *c) {
*c *= 2;
printf("Hello, World!\n");
}
int main(void) {
int c = 2;
int *dev_c;
//hipMalloc()
hipMalloc(&dev_c, sizeof(int));
hipMemcpy(dev_c, &c, sizeof(int), hipMemcpyHostToDevice);
//
hipLaunchKernelGGL(( add), dim3(1),dim3(1), 0, 0, dev_c);
//hipMemcpy()
hipMemcpy(&c, dev_c, sizeof(int), hipMemcpyDeviceToHost);
printf("2 * 2 = %d\n", c);
hipFree(dev_c);
return 0;
}
|
1a24c6f7c3d01f1e33e83c0c22ff9a4ab5fe1895.cu
|
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void add(int *c) {
*c *= 2;
printf("Hello, World!\n");
}
int main(void) {
int c = 2;
int *dev_c;
//cudaMalloc()
cudaMalloc(&dev_c, sizeof(int));
cudaMemcpy(dev_c, &c, sizeof(int), cudaMemcpyHostToDevice);
//核函数执行
add<<<1,1>>>(dev_c);
//cudaMemcpy()
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("2 * 2 = %d\n", c);
cudaFree(dev_c);
return 0;
}
|
bb310bc102ec92754b9f847527f8b9f4502439dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
#define BDIMX 32
#define BDIMY 32
#define IPAD 1
__global__ void setRowReadColPad(int * out)
{
__shared__ int tile[BDIMY][BDIMX + IPAD];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
//store to the shared memory
tile[threadIdx.y][threadIdx.x] = idx;
//waiting for all the threads in thread block to reach this point
__syncthreads();
//load from shared memory
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadColDynPad(int * out)
{
extern __shared__ int tile[];
int row_index = threadIdx.y * (blockDim.x+ IPAD) + threadIdx.x;
int col_index = threadIdx.x * (blockDim.x + IPAD) + threadIdx.y;
tile[row_index] = row_index;
__syncthreads();
out[row_index] = tile[col_index];
}
//int main(int argc, char **argv)
//{
// hipSharedMemConfig pConfig;
// hipDeviceGetSharedMemConfig(&pConfig);
// printf("with Bank Mode:%s ", pConfig == 1 ? "4-Byte" : "8-Byte");
//
// // set up array size 2048
// int nx = BDIMX;
// int ny = BDIMY;
//
// bool iprintf = 0;
//
// if (argc > 1) iprintf = atoi(argv[1]);
//
// size_t nBytes = nx * ny * sizeof(int);
//
// // execution configuration
// dim3 block(BDIMX, BDIMY);
// dim3 grid(1, 1);
// printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x,
// block.y);
//
// // allocate device memory
// int *d_C;
// hipMalloc((int**)&d_C, nBytes);
// int *gpuRef = (int *)malloc(nBytes);
//
// hipMemset(d_C, 0, nBytes);
// setRowReadColPad << <grid, block >> >(d_C);
// hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
//
// hipMemset(d_C, 0, nBytes);
// setRowReadColDynPad << <grid, block, sizeof(int) * ((nx + IPAD)*ny) >> > (d_C);
// hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost);
//
// // free host and device memory
// hipFree(d_C);
// free(gpuRef);
//
// // reset device
// hipDeviceReset();
// return EXIT_SUCCESS;
//}
|
bb310bc102ec92754b9f847527f8b9f4502439dd.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "common.h"
#define BDIMX 32
#define BDIMY 32
#define IPAD 1
__global__ void setRowReadColPad(int * out)
{
__shared__ int tile[BDIMY][BDIMX + IPAD];
int idx = threadIdx.y * blockDim.x + threadIdx.x;
//store to the shared memory
tile[threadIdx.y][threadIdx.x] = idx;
//waiting for all the threads in thread block to reach this point
__syncthreads();
//load from shared memory
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadColDynPad(int * out)
{
extern __shared__ int tile[];
int row_index = threadIdx.y * (blockDim.x+ IPAD) + threadIdx.x;
int col_index = threadIdx.x * (blockDim.x + IPAD) + threadIdx.y;
tile[row_index] = row_index;
__syncthreads();
out[row_index] = tile[col_index];
}
//int main(int argc, char **argv)
//{
// cudaSharedMemConfig pConfig;
// cudaDeviceGetSharedMemConfig(&pConfig);
// printf("with Bank Mode:%s ", pConfig == 1 ? "4-Byte" : "8-Byte");
//
// // set up array size 2048
// int nx = BDIMX;
// int ny = BDIMY;
//
// bool iprintf = 0;
//
// if (argc > 1) iprintf = atoi(argv[1]);
//
// size_t nBytes = nx * ny * sizeof(int);
//
// // execution configuration
// dim3 block(BDIMX, BDIMY);
// dim3 grid(1, 1);
// printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x,
// block.y);
//
// // allocate device memory
// int *d_C;
// cudaMalloc((int**)&d_C, nBytes);
// int *gpuRef = (int *)malloc(nBytes);
//
// cudaMemset(d_C, 0, nBytes);
// setRowReadColPad << <grid, block >> >(d_C);
// cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
//
// cudaMemset(d_C, 0, nBytes);
// setRowReadColDynPad << <grid, block, sizeof(int) * ((nx + IPAD)*ny) >> > (d_C);
// cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
//
// // free host and device memory
// cudaFree(d_C);
// free(gpuRef);
//
// // reset device
// cudaDeviceReset();
// return EXIT_SUCCESS;
//}
|
35a509cb210daea7390cf636a06f910458016403.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/dynamic_sample_conv_layer.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype> // n is H * W * h_samples_ * w_samples_ * C
__global__ void object_kernel_conv_im2col_kernel(const int n, const Dtype* data_im,const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int h_samples, const int w_samples,const int kernel_stride,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int c_ind = index % channels;
const int w_s_ind = index / channels % w_samples;
const int h_s_ind = index / channels / w_samples % h_samples;
const int w_ind = index / channels / w_samples / h_samples % width;
const int h_ind = index / channels / w_samples / h_samples / width % height;
const int n_ind = index / channels / w_samples / h_samples / width / height;
const int w_sample_pad = (w_samples - 1) / 2;
const int h_sample_pad = (h_samples - 1) / 2;
const int b_c_ind = c_ind;
const int b_w_ind = w_ind + (w_s_ind - w_samples + w_sample_pad + 1) * kernel_stride;
const int b_h_ind = h_ind + (h_s_ind - h_samples + h_sample_pad + 1) * kernel_stride;
const int top_offset = index * kernel_h * kernel_w;
const int bottom_offset = n_ind * channels * width * height + b_c_ind * width * height + b_h_ind * width + b_w_ind;
const int w_pad = (kernel_w - 1) / 2;
const int h_pad = (kernel_h - 1) / 2;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
data_col[top_offset + i * kernel_w + j] = 0;
if ((b_h_ind + dilation_h * (i - kernel_h + 1 + h_pad ) < height) &&b_h_ind + dilation_h * (i - kernel_h + h_pad + 1) >=0){
if ((b_w_ind + dilation_w * (j - kernel_w + w_pad + 1) < width) &&b_w_ind + dilation_w * (j - kernel_w + w_pad + 1) >=0){
data_col[top_offset + i * kernel_w + j] = data_im[bottom_offset + dilation_h * (i - kernel_h + 1 + h_pad ) * width + dilation_w * (j - kernel_w + 1 + w_pad)];
}
}
}
}
}
}
template <typename Dtype>
void object_kernel_conv_im2col(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int h_samples, const int w_samples,const int kernel_stride,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int pad_h = (kernel_h - 1)/2;
int pad_w = (kernel_w - 1)/2;
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) + 1;
int num_kernels = channels * height_col * width_col * h_samples * w_samples;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( object_kernel_conv_im2col_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_im,channels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, h_samples, w_samples,kernel_stride,data_col);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void object_kernel_conv_col2im_kernel(const int n, const Dtype* data_col,//from HWssCkk to CHW, n is CHW
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int h_samples, const int w_samples,
const int dilation_h, const int dilation_w, const int kernel_stride,Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {// n should be C H W
Dtype val = 0;
const int w_im = index % width ;
const int h_im = (index / width) % height ;
const int c_im = index / (width * height);
const int h_sample_pad = (h_samples + 1)/2;
const int w_sample_pad = (w_samples + 1)/2;
const int h_pad = (kernel_h + 1)/2;
const int w_pad = (kernel_w + 1)/2;
for (int s_h = 0; s_h < h_samples; s_h ++){
for (int s_w = 0; s_w < w_samples; s_w ++){
for (int k_h = 0; k_h < kernel_h; k_h ++){
for (int k_w = 0; k_w < kernel_w; k_w ++){
int val_ind_h = h_im + (s_h - h_samples + h_sample_pad) * kernel_stride + (k_h - kernel_h + h_pad)* dilation_h;
int val_ind_w = w_im + (s_w - w_samples + w_sample_pad) * kernel_stride + (k_w - kernel_w + w_pad)* dilation_w;
if (val_ind_w>=0 && val_ind_w < width && val_ind_h>=0 && val_ind_h < height){
const int col_index = ((((h_im*width + w_im)*h_samples + s_h )*w_samples + s_w )*channels + c_im)* kernel_h*kernel_w + k_h * kernel_w + k_w;
val += data_col[col_index];
}
}
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void object_kernel_conv_col2im(const Dtype* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples,const int w_samples,const int dilation_h, const int dilation_w, const int kernel_stride,Dtype* data_im) {
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( object_kernel_conv_col2im_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_kernels)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
num_kernels, data_col, height, width, channels, kernel_h, kernel_w, h_samples, w_samples, dilation_h, dilation_w, kernel_stride,data_im);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__device__ void gpu_matrix_mult(int nthreads,const Dtype *a,const Dtype *b, Dtype *c, int m, int n, int k,Dtype coeff){
CUDA_KERNEL_LOOP(index,nthreads) {
c[index] *= coeff;
const int b_col = index % k;
const int a_row = index / k;
for (int i = 0 ;i< n; i++){
c[index] +=( a[a_row * n + i]*b[i * k + b_col]);
}
}
}
template <typename Dtype> // n is H * W * h_samples_ * w_samples_ , weight is like W , H, C' C, k, k, (after permuted!!), top_data N, W , H , h_samples_ * w_samples_*C'
__global__ void object_forward(const int n, const Dtype* offset_col_buffer, const Dtype* across_weight, const Dtype* within_weight, const int channels, const int num_output, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples, const int w_samples, Dtype* top_data) {
//printf("n: %d %d",blockIdx.x * blockDim.x + threadIdx.x ,blockDim.x * gridDim.x);
CUDA_KERNEL_LOOP(index, n) {
const int output_c = index % num_output;
const int w_s_ind = index /num_output% w_samples;
const int h_s_ind = index /num_output/ w_samples % h_samples;
const int w_ind = index /num_output/ w_samples / h_samples % width;
const int h_ind = index / num_output/w_samples / h_samples / width % height;
const int n_ind = index / num_output/w_samples / h_samples / width / height;//printf("n_ind: %d\n", n_ind);
// H * W * h_samples_ * w_samples_ *C*k*k
const int offset_col_buffer_offset = (int)(index/ num_output)* channels * kernel_h * kernel_w;
//const int weight_offset = (n_ind * width * height + h_ind * width + w_ind) * num_output * channels*kernel_h * kernel_w;
const int across_weight_offset = (n_ind * width * height + h_ind * width + w_ind) * num_output * channels;
const int within_weight_offset = (n_ind * width * height + h_ind * width + w_ind) *num_output * kernel_h * kernel_w;
const int top_data_offset = (((n_ind * width * height + h_ind * width + w_ind)*h_samples + h_s_ind )* w_samples + w_s_ind)* num_output ;
// gpu_matrix_mult<Dtype>(num_output,weight + weight_offset/*C'*C*h*w*/, offset_col_buffer + offset_col_buffer_offset/*C*h*w*H'*W'*/, top_data + top_data_offset, num_output /*C'*/, channels * kernel_h * kernel_w/*C*h*w*/,1, (Dtype)0.); // C' * H' * W'
// int nthreads =num_output;
Dtype * c = top_data + top_data_offset;
Dtype coeff = 0.0;
const Dtype* a_across = across_weight + across_weight_offset;
const Dtype* a_within = within_weight + within_weight_offset;
const Dtype* b = offset_col_buffer + offset_col_buffer_offset;
top_data[index] *= coeff;
for (int i = 0 ; i< channels * kernel_h * kernel_w ; i++){
const int k_w = i%kernel_w;
const int k_h = i/kernel_w%kernel_h;
const int k_c = i/kernel_w/kernel_h;
const Dtype w_across_val = (k_w == int((kernel_w-1)/2)&&k_h == int((kernel_h-1)/2))?a_across[output_c*channels + k_c]:Dtype(0.0);
const Dtype w_within_val = a_within[output_c*kernel_h * kernel_w + k_h*kernel_w + k_w];
top_data[index] +=(( w_across_val+w_within_val)*b[i]);
//cd if (index == n -5)printf("hello world\n");
//printf("index: %d\n",index);
}
}
}
template <typename Dtype>
void DynamicSampleConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_across_weights = bottom[1]->gpu_data();// N,W,H,C'*C
const Dtype* bottom_within_weights = bottom[2]->gpu_data();// N,W,H,C'*s*s
Dtype* top_data = top[0]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
const Dtype* input = bottom_data + n * this->bottom_dim_;
const Dtype* across_weights = bottom_across_weights + n * bottom[1]->count(1);
const Dtype* within_weights = bottom_within_weights + n * bottom[2]->count(1);
Dtype* output = top_data + n * top[0]->count(1);
//printf("begin im2col\n");
object_kernel_conv_im2col(input, channels_,height_, width_, kernel_size_, kernel_size_, dilation_, dilation_, samples_, samples_, kernel_stride_,offset_col_buffer_.mutable_gpu_data());
//printf("im2col done\n");printf("begin forward %d\n", height_ * width_ * samples_ * samples_*channels_);
hipLaunchKernelGGL(( object_forward<Dtype>), dim3(CAFFE_GET_BLOCKS(height_ * width_ * samples_ * samples_*num_output_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, height_ * width_ * samples_ * samples_*num_output_, offset_col_buffer_.gpu_data(), across_weights,within_weights, channels_,num_output_,height_,width_,kernel_size_,kernel_size_,samples_,samples_,output);//N,H,W,9*C'
//printf("forward done\n");
}
}
//across weight NHWC'C
//within weight NHWC'kk
//col_buf NHWssCkk
//top_dif NHWssC'
template <typename Dtype> // n is H * W *C'C , weight is like W , H, C' C, k, k, (after permuted!!), top_data N, W , H , h_samples_ * w_samples_*C'
__global__ void object_kernel_backward_across_weight(const int n, const Dtype* offset_col_buffer, const Dtype* top_diff,const int channels, const int num_output, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples, const int w_samples, Dtype* across_weight_diff, Dtype* within_weight_diff) {
CUDA_KERNEL_LOOP(index, n) {
const int c = index % channels;
const int output_c = index/channels%num_output;
const int w_ind = index /channels/num_output% width;
const int h_ind = index /channels/num_output/ width % height;
const int kernel_dim_ = channels * kernel_h*kernel_w;
for(int s_h = 0; s_h < h_samples;s_h ++){
for(int s_w = 0; s_w < w_samples; s_w++){
int buffer_offset = (((h_ind* width + w_ind)*h_samples + s_h)*w_samples)* channels*kernel_h*kernel_w;
int top_offset = (((h_ind* width + w_ind)*h_samples + s_h)*w_samples)*num_output;
//int weight_offset = index*num_output*channels*kernel_h*kernel_w;
const int across_weight_offset = (h_ind* width + w_ind) * num_output * channels;
const int within_weight_offset = (h_ind* width + w_ind) * num_output * kernel_h * kernel_w;
//gpu_matrix_mult<Dtype>( num_output*kernel_dim_, top_diff + top_offset ,offset_col_buffer + buffer_offset , weight_diff + weight_offset, num_output /* C' */, 1,kernel_dim_ /* C * h * w */ ,(Dtype)1.);
Dtype * c_across = across_weight_diff + across_weight_offset;
Dtype coeff = 1.0;
const Dtype* a = top_diff + top_offset;
const Dtype* b = offset_col_buffer + buffer_offset;
c_across[output_c*channels+c] += a[output_c]*b[c * kernel_h * kernel_w + ((kernel_h -1)/2)*kernel_w + (kernel_w-1)/2];
}
}
}
}
//across weight NHWC'C
//within weight NHWC'kk
//col_buf NHWssCkk
//top_dif NHWssC'
template <typename Dtype> // n is H * W C'kk , weight is like W , H, C' C, k, k, (after permuted!!), top_data N, W , H , h_samples_ * w_samples_*C'
__global__ void object_kernel_backward_within_weight(const int n, const Dtype* offset_col_buffer, const Dtype* top_diff,const int channels, const int num_output, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples, const int w_samples, Dtype* across_weight_diff, Dtype* within_weight_diff) {
CUDA_KERNEL_LOOP(index, n) {
const int k_w = index%kernel_w;
const int k_h = index/ kernel_w %kernel_h;
const int output_c = index/ kernel_w /kernel_h%num_output;
const int w_ind = index/ kernel_w /kernel_h/num_output % width;
const int h_ind = index/ kernel_w /kernel_h/num_output / width % height;
const int kernel_dim_ = channels * kernel_h*kernel_w;
for(int s_h = 0; s_h < h_samples;s_h ++){
for(int s_w = 0; s_w < w_samples; s_w++){
int buffer_offset = (((h_ind* width + w_ind)*h_samples + s_h)*w_samples)* channels*kernel_h*kernel_w;
int top_offset = (((h_ind* width + w_ind)*h_samples + s_h)*w_samples)*num_output;
//int weight_offset = index*num_output*channels*kernel_h*kernel_w;
const int within_weight_offset = (h_ind* width + w_ind) * num_output * kernel_h * kernel_w;
//gpu_matrix_mult<Dtype>( num_output*kernel_dim_, top_diff + top_offset ,offset_col_buffer + buffer_offset , weight_diff + weight_offset, num_output /* C' */, 1,kernel_dim_ /* C * h * w */ ,(Dtype)1.);
Dtype * c_within = within_weight_diff + within_weight_offset;
Dtype coeff = 1.0;
const Dtype* a = top_diff + top_offset;
const Dtype* b = offset_col_buffer + buffer_offset;
for(int ind =0 ;ind<channels;ind++){
c_within[output_c* kernel_h*kernel_w + k_h * kernel_w + k_w] += a[output_c]*b[ind * kernel_h * kernel_w + k_h*kernel_w + k_w];
}
}
}
}
}
//across weight NHWC'C
//within weight NHWC'kk
//col_buf NHWssCkk
//top_dif NHWssC'
template <typename Dtype> // n is H W ssCkk , weight is like W , H, C' C, k, k, (after permuted!!), top_data N, W , H , h_samples_ * w_samples_*C'
__global__ void object_kernel_backward_data(const int n, const Dtype* across_weight,const Dtype* within_weight, const Dtype* top_diff,const int channels, const int num_output, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples, const int w_samples, Dtype* offset_col_buffer) {
CUDA_KERNEL_LOOP(index, n) {
const int k_w = index%kernel_w;
const int k_h = index/kernel_w%kernel_h;
const int k_c = index/kernel_w/kernel_h%channels;
const int s_w = index/kernel_w/kernel_h/channels%w_samples;
const int s_h = index/kernel_w/kernel_h/channels/w_samples%h_samples;
const int w_ind = index/kernel_w/kernel_h/channels/w_samples/h_samples % width;
const int h_ind = index/kernel_w/kernel_h/channels/w_samples/h_samples/ width % height;
const int kernel_dim_ = channels * kernel_h*kernel_w;
int buffer_offset = index;
int top_offset = (int)(index/kernel_dim_)*num_output;
const int across_weight_offset = (h_ind* width + w_ind) * num_output * channels;
const int within_weight_offset = (h_ind* width + w_ind) * num_output * kernel_h * kernel_w;
const Dtype* a = top_diff + top_offset;
const Dtype* b_across = across_weight + across_weight_offset;
const Dtype* b_within = within_weight + within_weight_offset;
offset_col_buffer[index] =0;
for(int ind =0 ;ind<num_output ;ind++){
const Dtype w_across_val = (k_w == int((kernel_w-1)/2)&&k_h == int((kernel_h-1)/2))?b_across[ind*channels + k_c]:Dtype(0.0);
const Dtype w_within_val = b_within[ind*kernel_h * kernel_w + k_h*kernel_w + k_w];
offset_col_buffer[index] += a[ind]*(w_across_val+w_within_val);
}
}
}
template <typename Dtype>
void DynamicSampleConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_across_weights = bottom[1]->gpu_data();// N,W,H,C'*C
const Dtype* bottom_within_weights = bottom[2]->gpu_data();// N,W,H,C'*s*s
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_across_weight_diff = bottom[1]->mutable_gpu_diff();
Dtype* bottom_within_weight_diff = bottom[2]->mutable_gpu_diff();
caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom_diff);
caffe_gpu_set(bottom[1]->count(), Dtype(0.0), bottom_across_weight_diff );
caffe_gpu_set(bottom[2]->count(), Dtype(0.0), bottom_within_weight_diff);
//printf("beign Backward\n");
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
//const Dtype* weights = bottom_weights + n * bottom[1]->count(1);
const Dtype* across_weights = bottom_across_weights + n * bottom[1]->count(1);
const Dtype* within_weights = bottom_within_weights + n * bottom[2]->count(1);
const Dtype* diff = top_diff + n * top[0]->count(1);
const Dtype* input = bottom_data + n * this->bottom_dim_;
Dtype* across_weights_diff = bottom_across_weight_diff + n * bottom[1]->count(1);
Dtype* within_weights_diff = bottom_within_weight_diff + n * bottom[2]->count(1);
//printf("begin im2col\n");
object_kernel_conv_im2col(input, channels_,height_, width_, kernel_size_, kernel_size_, dilation_, dilation_, samples_, samples_, kernel_stride_,offset_col_buffer_.mutable_gpu_data());
//printf("im2col done\n");
const Dtype* offset_col_buff = offset_col_buffer_.gpu_data();
// printf("begin back to across\n");
hipLaunchKernelGGL(( object_kernel_backward_across_weight), dim3(CAFFE_GET_BLOCKS(height_ * width_ )), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, height_*width_ , offset_col_buff, diff, channels_, num_output_, height_, width_, kernel_size_, kernel_size_, samples_, samples_, across_weights_diff,within_weights_diff);
// printf("back to across done, begin back to within\n");
hipLaunchKernelGGL(( object_kernel_backward_within_weight), dim3(CAFFE_GET_BLOCKS(height_ * width_ )), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, height_*width_ , offset_col_buff, diff, channels_, num_output_, height_, width_, kernel_size_, kernel_size_, samples_, samples_, across_weights_diff,within_weights_diff);
// printf("back to within done\n");
// gradient w.r.t. bottom data, if necessary.
Dtype * att_col_diff_buff = offset_col_buffer_.mutable_gpu_data();
// printf("begin back to col\n");
hipLaunchKernelGGL(( object_kernel_backward_data), dim3(CAFFE_GET_BLOCKS(height_ * width_ )), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, height_*width_ ,across_weights,within_weights, diff, channels_, num_output_, height_, width_, kernel_size_, kernel_size_, samples_, samples_, att_col_diff_buff);
// printf("back to col done, begin col2im\n");
object_kernel_conv_col2im(offset_col_buffer_.gpu_data(), channels_, height_, width_, kernel_size_, kernel_size_, samples_,samples_, dilation_, dilation_,kernel_stride_, bottom_diff + n*bottom[0]->count(1));
// printf(" col2im done\n");
}
caffe_gpu_scal(bottom[0]->count(),Dtype(lr_mul),bottom_diff);
//caffe_gpu_scal(bottom[1]->count(),Dtype(lr_mul),bottom_weight_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(DynamicSampleConvolutionLayer);
} // namespace caffe
|
35a509cb210daea7390cf636a06f910458016403.cu
|
#include <vector>
#include "caffe/layers/dynamic_sample_conv_layer.hpp"
#include "caffe/util/im2col.hpp"
namespace caffe {
template <typename Dtype> // n is H * W * h_samples_ * w_samples_ * C
__global__ void object_kernel_conv_im2col_kernel(const int n, const Dtype* data_im,const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int h_samples, const int w_samples,const int kernel_stride,
Dtype* data_col) {
CUDA_KERNEL_LOOP(index, n) {
const int c_ind = index % channels;
const int w_s_ind = index / channels % w_samples;
const int h_s_ind = index / channels / w_samples % h_samples;
const int w_ind = index / channels / w_samples / h_samples % width;
const int h_ind = index / channels / w_samples / h_samples / width % height;
const int n_ind = index / channels / w_samples / h_samples / width / height;
const int w_sample_pad = (w_samples - 1) / 2;
const int h_sample_pad = (h_samples - 1) / 2;
const int b_c_ind = c_ind;
const int b_w_ind = w_ind + (w_s_ind - w_samples + w_sample_pad + 1) * kernel_stride;
const int b_h_ind = h_ind + (h_s_ind - h_samples + h_sample_pad + 1) * kernel_stride;
const int top_offset = index * kernel_h * kernel_w;
const int bottom_offset = n_ind * channels * width * height + b_c_ind * width * height + b_h_ind * width + b_w_ind;
const int w_pad = (kernel_w - 1) / 2;
const int h_pad = (kernel_h - 1) / 2;
for (int i = 0; i < kernel_h; ++i) {
for (int j = 0; j < kernel_w; ++j) {
data_col[top_offset + i * kernel_w + j] = 0;
if ((b_h_ind + dilation_h * (i - kernel_h + 1 + h_pad ) < height) &&b_h_ind + dilation_h * (i - kernel_h + h_pad + 1) >=0){
if ((b_w_ind + dilation_w * (j - kernel_w + w_pad + 1) < width) &&b_w_ind + dilation_w * (j - kernel_w + w_pad + 1) >=0){
data_col[top_offset + i * kernel_w + j] = data_im[bottom_offset + dilation_h * (i - kernel_h + 1 + h_pad ) * width + dilation_w * (j - kernel_w + 1 + w_pad)];
}
}
}
}
}
}
template <typename Dtype>
void object_kernel_conv_im2col(const Dtype* data_im, const int channels,
const int height, const int width, const int kernel_h, const int kernel_w,
const int dilation_h, const int dilation_w,
const int h_samples, const int w_samples,const int kernel_stride,
Dtype* data_col) {
// We are going to launch channels * height_col * width_col kernels, each
// kernel responsible for copying a single-channel grid.
int pad_h = (kernel_h - 1)/2;
int pad_w = (kernel_w - 1)/2;
int height_col = (height + 2 * pad_h -
(dilation_h * (kernel_h - 1) + 1)) + 1;
int width_col = (width + 2 * pad_w -
(dilation_w * (kernel_w - 1) + 1)) + 1;
int num_kernels = channels * height_col * width_col * h_samples * w_samples;
// NOLINT_NEXT_LINE(whitespace/operators)
object_kernel_conv_im2col_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_im,channels, height, width, kernel_h, kernel_w, dilation_h, dilation_w, h_samples, w_samples,kernel_stride,data_col);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void object_kernel_conv_col2im_kernel(const int n, const Dtype* data_col,//from HWssCkk to CHW, n is CHW
const int height, const int width, const int channels,
const int kernel_h, const int kernel_w,
const int h_samples, const int w_samples,
const int dilation_h, const int dilation_w, const int kernel_stride,Dtype* data_im) {
CUDA_KERNEL_LOOP(index, n) {// n should be C H W
Dtype val = 0;
const int w_im = index % width ;
const int h_im = (index / width) % height ;
const int c_im = index / (width * height);
const int h_sample_pad = (h_samples + 1)/2;
const int w_sample_pad = (w_samples + 1)/2;
const int h_pad = (kernel_h + 1)/2;
const int w_pad = (kernel_w + 1)/2;
for (int s_h = 0; s_h < h_samples; s_h ++){
for (int s_w = 0; s_w < w_samples; s_w ++){
for (int k_h = 0; k_h < kernel_h; k_h ++){
for (int k_w = 0; k_w < kernel_w; k_w ++){
int val_ind_h = h_im + (s_h - h_samples + h_sample_pad) * kernel_stride + (k_h - kernel_h + h_pad)* dilation_h;
int val_ind_w = w_im + (s_w - w_samples + w_sample_pad) * kernel_stride + (k_w - kernel_w + w_pad)* dilation_w;
if (val_ind_w>=0 && val_ind_w < width && val_ind_h>=0 && val_ind_h < height){
const int col_index = ((((h_im*width + w_im)*h_samples + s_h )*w_samples + s_w )*channels + c_im)* kernel_h*kernel_w + k_h * kernel_w + k_w;
val += data_col[col_index];
}
}
}
}
}
data_im[index] = val;
}
}
template <typename Dtype>
void object_kernel_conv_col2im(const Dtype* data_col, const int channels, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples,const int w_samples,const int dilation_h, const int dilation_w, const int kernel_stride,Dtype* data_im) {
int num_kernels = channels * height * width;
// To avoid involving atomic operations, we will launch one kernel per
// bottom dimension, and then in the kernel add up the top dimensions.
// NOLINT_NEXT_LINE(whitespace/operators)
object_kernel_conv_col2im_kernel<Dtype><<<CAFFE_GET_BLOCKS(num_kernels),
CAFFE_CUDA_NUM_THREADS>>>(
num_kernels, data_col, height, width, channels, kernel_h, kernel_w, h_samples, w_samples, dilation_h, dilation_w, kernel_stride,data_im);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__device__ void gpu_matrix_mult(int nthreads,const Dtype *a,const Dtype *b, Dtype *c, int m, int n, int k,Dtype coeff){
CUDA_KERNEL_LOOP(index,nthreads) {
c[index] *= coeff;
const int b_col = index % k;
const int a_row = index / k;
for (int i = 0 ;i< n; i++){
c[index] +=( a[a_row * n + i]*b[i * k + b_col]);
}
}
}
template <typename Dtype> // n is H * W * h_samples_ * w_samples_ , weight is like W , H, C' C, k, k, (after permuted!!), top_data N, W , H , h_samples_ * w_samples_*C'
__global__ void object_forward(const int n, const Dtype* offset_col_buffer, const Dtype* across_weight, const Dtype* within_weight, const int channels, const int num_output, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples, const int w_samples, Dtype* top_data) {
//printf("n: %d %d",blockIdx.x * blockDim.x + threadIdx.x ,blockDim.x * gridDim.x);
CUDA_KERNEL_LOOP(index, n) {
const int output_c = index % num_output;
const int w_s_ind = index /num_output% w_samples;
const int h_s_ind = index /num_output/ w_samples % h_samples;
const int w_ind = index /num_output/ w_samples / h_samples % width;
const int h_ind = index / num_output/w_samples / h_samples / width % height;
const int n_ind = index / num_output/w_samples / h_samples / width / height;//printf("n_ind: %d\n", n_ind);
// H * W * h_samples_ * w_samples_ *C*k*k
const int offset_col_buffer_offset = (int)(index/ num_output)* channels * kernel_h * kernel_w;
//const int weight_offset = (n_ind * width * height + h_ind * width + w_ind) * num_output * channels*kernel_h * kernel_w;
const int across_weight_offset = (n_ind * width * height + h_ind * width + w_ind) * num_output * channels;
const int within_weight_offset = (n_ind * width * height + h_ind * width + w_ind) *num_output * kernel_h * kernel_w;
const int top_data_offset = (((n_ind * width * height + h_ind * width + w_ind)*h_samples + h_s_ind )* w_samples + w_s_ind)* num_output ;
// gpu_matrix_mult<Dtype>(num_output,weight + weight_offset/*C'*C*h*w*/, offset_col_buffer + offset_col_buffer_offset/*C*h*w*H'*W'*/, top_data + top_data_offset, num_output /*C'*/, channels * kernel_h * kernel_w/*C*h*w*/,1, (Dtype)0.); // C' * H' * W'
// int nthreads =num_output;
Dtype * c = top_data + top_data_offset;
Dtype coeff = 0.0;
const Dtype* a_across = across_weight + across_weight_offset;
const Dtype* a_within = within_weight + within_weight_offset;
const Dtype* b = offset_col_buffer + offset_col_buffer_offset;
top_data[index] *= coeff;
for (int i = 0 ; i< channels * kernel_h * kernel_w ; i++){
const int k_w = i%kernel_w;
const int k_h = i/kernel_w%kernel_h;
const int k_c = i/kernel_w/kernel_h;
const Dtype w_across_val = (k_w == int((kernel_w-1)/2)&&k_h == int((kernel_h-1)/2))?a_across[output_c*channels + k_c]:Dtype(0.0);
const Dtype w_within_val = a_within[output_c*kernel_h * kernel_w + k_h*kernel_w + k_w];
top_data[index] +=(( w_across_val+w_within_val)*b[i]);
//cd if (index == n -5)printf("hello world\n");
//printf("index: %d\n",index);
}
}
}
template <typename Dtype>
void DynamicSampleConvolutionLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_across_weights = bottom[1]->gpu_data();// N,W,H,C'*C
const Dtype* bottom_within_weights = bottom[2]->gpu_data();// N,W,H,C'*s*s
Dtype* top_data = top[0]->mutable_gpu_data();
for (int n = 0; n < this->num_; ++n) {
const Dtype* input = bottom_data + n * this->bottom_dim_;
const Dtype* across_weights = bottom_across_weights + n * bottom[1]->count(1);
const Dtype* within_weights = bottom_within_weights + n * bottom[2]->count(1);
Dtype* output = top_data + n * top[0]->count(1);
//printf("begin im2col\n");
object_kernel_conv_im2col(input, channels_,height_, width_, kernel_size_, kernel_size_, dilation_, dilation_, samples_, samples_, kernel_stride_,offset_col_buffer_.mutable_gpu_data());
//printf("im2col done\n");printf("begin forward %d\n", height_ * width_ * samples_ * samples_*channels_);
object_forward<Dtype><<<CAFFE_GET_BLOCKS(height_ * width_ * samples_ * samples_*num_output_), CAFFE_CUDA_NUM_THREADS>>>(height_ * width_ * samples_ * samples_*num_output_, offset_col_buffer_.gpu_data(), across_weights,within_weights, channels_,num_output_,height_,width_,kernel_size_,kernel_size_,samples_,samples_,output);//N,H,W,9*C'
//printf("forward done\n");
}
}
//across weight NHWC'C
//within weight NHWC'kk
//col_buf NHWssCkk
//top_dif NHWssC'
template <typename Dtype> // n is H * W *C'C , weight is like W , H, C' C, k, k, (after permuted!!), top_data N, W , H , h_samples_ * w_samples_*C'
__global__ void object_kernel_backward_across_weight(const int n, const Dtype* offset_col_buffer, const Dtype* top_diff,const int channels, const int num_output, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples, const int w_samples, Dtype* across_weight_diff, Dtype* within_weight_diff) {
CUDA_KERNEL_LOOP(index, n) {
const int c = index % channels;
const int output_c = index/channels%num_output;
const int w_ind = index /channels/num_output% width;
const int h_ind = index /channels/num_output/ width % height;
const int kernel_dim_ = channels * kernel_h*kernel_w;
for(int s_h = 0; s_h < h_samples;s_h ++){
for(int s_w = 0; s_w < w_samples; s_w++){
int buffer_offset = (((h_ind* width + w_ind)*h_samples + s_h)*w_samples)* channels*kernel_h*kernel_w;
int top_offset = (((h_ind* width + w_ind)*h_samples + s_h)*w_samples)*num_output;
//int weight_offset = index*num_output*channels*kernel_h*kernel_w;
const int across_weight_offset = (h_ind* width + w_ind) * num_output * channels;
const int within_weight_offset = (h_ind* width + w_ind) * num_output * kernel_h * kernel_w;
//gpu_matrix_mult<Dtype>( num_output*kernel_dim_, top_diff + top_offset ,offset_col_buffer + buffer_offset , weight_diff + weight_offset, num_output /* C' */, 1,kernel_dim_ /* C * h * w */ ,(Dtype)1.);
Dtype * c_across = across_weight_diff + across_weight_offset;
Dtype coeff = 1.0;
const Dtype* a = top_diff + top_offset;
const Dtype* b = offset_col_buffer + buffer_offset;
c_across[output_c*channels+c] += a[output_c]*b[c * kernel_h * kernel_w + ((kernel_h -1)/2)*kernel_w + (kernel_w-1)/2];
}
}
}
}
//across weight NHWC'C
//within weight NHWC'kk
//col_buf NHWssCkk
//top_dif NHWssC'
template <typename Dtype> // n is H * W C'kk , weight is like W , H, C' C, k, k, (after permuted!!), top_data N, W , H , h_samples_ * w_samples_*C'
__global__ void object_kernel_backward_within_weight(const int n, const Dtype* offset_col_buffer, const Dtype* top_diff,const int channels, const int num_output, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples, const int w_samples, Dtype* across_weight_diff, Dtype* within_weight_diff) {
CUDA_KERNEL_LOOP(index, n) {
const int k_w = index%kernel_w;
const int k_h = index/ kernel_w %kernel_h;
const int output_c = index/ kernel_w /kernel_h%num_output;
const int w_ind = index/ kernel_w /kernel_h/num_output % width;
const int h_ind = index/ kernel_w /kernel_h/num_output / width % height;
const int kernel_dim_ = channels * kernel_h*kernel_w;
for(int s_h = 0; s_h < h_samples;s_h ++){
for(int s_w = 0; s_w < w_samples; s_w++){
int buffer_offset = (((h_ind* width + w_ind)*h_samples + s_h)*w_samples)* channels*kernel_h*kernel_w;
int top_offset = (((h_ind* width + w_ind)*h_samples + s_h)*w_samples)*num_output;
//int weight_offset = index*num_output*channels*kernel_h*kernel_w;
const int within_weight_offset = (h_ind* width + w_ind) * num_output * kernel_h * kernel_w;
//gpu_matrix_mult<Dtype>( num_output*kernel_dim_, top_diff + top_offset ,offset_col_buffer + buffer_offset , weight_diff + weight_offset, num_output /* C' */, 1,kernel_dim_ /* C * h * w */ ,(Dtype)1.);
Dtype * c_within = within_weight_diff + within_weight_offset;
Dtype coeff = 1.0;
const Dtype* a = top_diff + top_offset;
const Dtype* b = offset_col_buffer + buffer_offset;
for(int ind =0 ;ind<channels;ind++){
c_within[output_c* kernel_h*kernel_w + k_h * kernel_w + k_w] += a[output_c]*b[ind * kernel_h * kernel_w + k_h*kernel_w + k_w];
}
}
}
}
}
//across weight NHWC'C
//within weight NHWC'kk
//col_buf NHWssCkk
//top_dif NHWssC'
template <typename Dtype> // n is H W ssCkk , weight is like W , H, C' C, k, k, (after permuted!!), top_data N, W , H , h_samples_ * w_samples_*C'
__global__ void object_kernel_backward_data(const int n, const Dtype* across_weight,const Dtype* within_weight, const Dtype* top_diff,const int channels, const int num_output, const int height, const int width, const int kernel_h, const int kernel_w, const int h_samples, const int w_samples, Dtype* offset_col_buffer) {
CUDA_KERNEL_LOOP(index, n) {
const int k_w = index%kernel_w;
const int k_h = index/kernel_w%kernel_h;
const int k_c = index/kernel_w/kernel_h%channels;
const int s_w = index/kernel_w/kernel_h/channels%w_samples;
const int s_h = index/kernel_w/kernel_h/channels/w_samples%h_samples;
const int w_ind = index/kernel_w/kernel_h/channels/w_samples/h_samples % width;
const int h_ind = index/kernel_w/kernel_h/channels/w_samples/h_samples/ width % height;
const int kernel_dim_ = channels * kernel_h*kernel_w;
int buffer_offset = index;
int top_offset = (int)(index/kernel_dim_)*num_output;
const int across_weight_offset = (h_ind* width + w_ind) * num_output * channels;
const int within_weight_offset = (h_ind* width + w_ind) * num_output * kernel_h * kernel_w;
const Dtype* a = top_diff + top_offset;
const Dtype* b_across = across_weight + across_weight_offset;
const Dtype* b_within = within_weight + within_weight_offset;
offset_col_buffer[index] =0;
for(int ind =0 ;ind<num_output ;ind++){
const Dtype w_across_val = (k_w == int((kernel_w-1)/2)&&k_h == int((kernel_h-1)/2))?b_across[ind*channels + k_c]:Dtype(0.0);
const Dtype w_within_val = b_within[ind*kernel_h * kernel_w + k_h*kernel_w + k_w];
offset_col_buffer[index] += a[ind]*(w_across_val+w_within_val);
}
}
}
template <typename Dtype>
void DynamicSampleConvolutionLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* bottom_across_weights = bottom[1]->gpu_data();// N,W,H,C'*C
const Dtype* bottom_within_weights = bottom[2]->gpu_data();// N,W,H,C'*s*s
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
Dtype* bottom_across_weight_diff = bottom[1]->mutable_gpu_diff();
Dtype* bottom_within_weight_diff = bottom[2]->mutable_gpu_diff();
caffe_gpu_set(bottom[0]->count(), Dtype(0.0), bottom_diff);
caffe_gpu_set(bottom[1]->count(), Dtype(0.0), bottom_across_weight_diff );
caffe_gpu_set(bottom[2]->count(), Dtype(0.0), bottom_within_weight_diff);
//printf("beign Backward\n");
for (int n = 0; n < this->num_; ++n) {
// gradient w.r.t. weight. Note that we will accumulate diffs.
//const Dtype* weights = bottom_weights + n * bottom[1]->count(1);
const Dtype* across_weights = bottom_across_weights + n * bottom[1]->count(1);
const Dtype* within_weights = bottom_within_weights + n * bottom[2]->count(1);
const Dtype* diff = top_diff + n * top[0]->count(1);
const Dtype* input = bottom_data + n * this->bottom_dim_;
Dtype* across_weights_diff = bottom_across_weight_diff + n * bottom[1]->count(1);
Dtype* within_weights_diff = bottom_within_weight_diff + n * bottom[2]->count(1);
//printf("begin im2col\n");
object_kernel_conv_im2col(input, channels_,height_, width_, kernel_size_, kernel_size_, dilation_, dilation_, samples_, samples_, kernel_stride_,offset_col_buffer_.mutable_gpu_data());
//printf("im2col done\n");
const Dtype* offset_col_buff = offset_col_buffer_.gpu_data();
// printf("begin back to across\n");
object_kernel_backward_across_weight<<<CAFFE_GET_BLOCKS(height_ * width_ ), CAFFE_CUDA_NUM_THREADS>>>(height_*width_ , offset_col_buff, diff, channels_, num_output_, height_, width_, kernel_size_, kernel_size_, samples_, samples_, across_weights_diff,within_weights_diff);
// printf("back to across done, begin back to within\n");
object_kernel_backward_within_weight<<<CAFFE_GET_BLOCKS(height_ * width_ ), CAFFE_CUDA_NUM_THREADS>>>(height_*width_ , offset_col_buff, diff, channels_, num_output_, height_, width_, kernel_size_, kernel_size_, samples_, samples_, across_weights_diff,within_weights_diff);
// printf("back to within done\n");
// gradient w.r.t. bottom data, if necessary.
Dtype * att_col_diff_buff = offset_col_buffer_.mutable_gpu_data();
// printf("begin back to col\n");
object_kernel_backward_data<<<CAFFE_GET_BLOCKS(height_ * width_ ), CAFFE_CUDA_NUM_THREADS>>>(height_*width_ ,across_weights,within_weights, diff, channels_, num_output_, height_, width_, kernel_size_, kernel_size_, samples_, samples_, att_col_diff_buff);
// printf("back to col done, begin col2im\n");
object_kernel_conv_col2im(offset_col_buffer_.gpu_data(), channels_, height_, width_, kernel_size_, kernel_size_, samples_,samples_, dilation_, dilation_,kernel_stride_, bottom_diff + n*bottom[0]->count(1));
// printf(" col2im done\n");
}
caffe_gpu_scal(bottom[0]->count(),Dtype(lr_mul),bottom_diff);
//caffe_gpu_scal(bottom[1]->count(),Dtype(lr_mul),bottom_weight_diff);
}
INSTANTIATE_LAYER_GPU_FUNCS(DynamicSampleConvolutionLayer);
} // namespace caffe
|
06e2a5da44223b43303bd7593ccb04c06e149863.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/ztrtri_diag_batched.cu, normal z -> d, Mon Jun 25 18:24:16 2018
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o
in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "dtrtri.cuh"
/***************************************************************************//**
Purpose
-------
DTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in dtrsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag_batched
*******************************************************************************/
extern "C" void
magmablas_dtrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
double const * const *dA_array, magma_int_t ldda,
double **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_dlaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_D_ZERO, MAGMA_D_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_dlaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( dtrtri_diag_lower_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_dgemm16_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm16_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_dgemm32_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm32_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_dgemm64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_dgemm_above64_part1_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part2_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part3_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
hipLaunchKernelGGL(( dtrtri_diag_upper_kernel_batched)
, dim3(diaggrid), dim3(IB), 0, queue->cuda_stream() ,
diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
hipLaunchKernelGGL(( triple_dgemm16_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm16_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
hipLaunchKernelGGL(( triple_dgemm32_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm32_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
hipLaunchKernelGGL(( triple_dgemm64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
hipLaunchKernelGGL(( triple_dgemm_above64_part1_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part2_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
hipLaunchKernelGGL(( triple_dgemm_above64_part3_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue->cuda_stream() , n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
06e2a5da44223b43303bd7593ccb04c06e149863.cu
|
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@generated from magmablas/ztrtri_diag_batched.cu, normal z -> d, Mon Jun 25 18:24:16 2018
@author Peng Du
@author Tingxing Dong
@author Mark Gates
@author Azzam Haidar
File named dtrtri_diag.cu to avoid name conflict with src/dtrtri.o
in the library. The actual kernels are in dtrtri_lower.cu and dtrtri_upper.cu
*/
#include "magma_internal.h"
#define TRTRI_BATCHED
#include "dtrtri.cuh"
/***************************************************************************//**
Purpose
-------
DTRTRI_DIAG inverts the NB x NB diagonal blocks of a triangular matrix.
This routine is used in dtrsm.
Arguments
----------
@param[in]
uplo magma_uplo_t.
On entry, uplo specifies whether the matrix A is an upper or
lower triangular matrix as follows:
- = MagmaUpper: A is an upper triangular matrix.
- = MagmaLower: A is a lower triangular matrix.
@param[in]
diag magma_diag_t.
On entry, diag specifies whether or not A is unit triangular
as follows:
- = MagmaUnit: A is assumed to be unit triangular.
- = MagmaNonUnit: A is not assumed to be unit triangular.
@param[in]
n INTEGER.
On entry, n specifies the order of the matrix A. N >= 0.
@param[in]
dA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array A of dimension ( ldda, n )
The triangular matrix A.
\n
If UPLO = MagmaUpper, the leading N-by-N upper triangular part of A
contains the upper triangular matrix, and the strictly lower
triangular part of A is not referenced.
\n
If UPLO = MagmaLower, the leading N-by-N lower triangular part of A
contains the lower triangular matrix, and the strictly upper
triangular part of A is not referenced.
\n
If DIAG = MagmaUnit, the diagonal elements of A are also not referenced
and are assumed to be 1.
@param[in]
ldda INTEGER.
The leading dimension of each array A. LDDA >= max(1,N).
@param[out]
dinvA_array Array of pointers, dimension (batchCount).
Each is a DOUBLE PRECISION array dinvA of dimension (NB, ceil(n/NB)*NB),
where NB = 128.
On exit, contains inverses of the NB-by-NB diagonal blocks of A.
@param[in]
resetozero INTEGER
If not zero, each array dinvA will be reset to all zeros
@param[in]
batchCount INTEGER
The number of matrices to operate on.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_trtri_diag_batched
*******************************************************************************/
extern "C" void
magmablas_dtrtri_diag_batched(
magma_uplo_t uplo, magma_diag_t diag, magma_int_t n,
double const * const *dA_array, magma_int_t ldda,
double **dinvA_array,
magma_int_t resetozero, magma_int_t batchCount, magma_queue_t queue)
{
magma_int_t info = 0;
if (uplo != MagmaLower && uplo != MagmaUpper)
info = -1;
else if (diag != MagmaNonUnit && diag != MagmaUnit)
info = -2;
else if (n < 0)
info = -3;
else if (ldda < n)
info = -5;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info
}
int nblocks = magma_ceildiv( n, IB );
if ( resetozero ) {
magmablas_dlaset_batched(MagmaFull, magma_roundup( n, NB ), NB, MAGMA_D_ZERO, MAGMA_D_ZERO, dinvA_array, magma_roundup( n, NB ), batchCount, queue);
}
// if someone want to use cudamemset he need to set the whole vectors
// of initial size otherwise it is a bug and thus need to have dinvA_length
// in input parameter and has been tested and was slower.
//was not the largest size computed by the high API getrf_batched then it is bug and need to use magmablas_dlaset_batched
if ( uplo == MagmaLower ) {
// invert diagonal IB x IB inner blocks
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
dtrtri_diag_lower_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// build up NB x NB blocks (assuming IB=16 here):
// use 16 x 16 blocks to build 32 x 32 blocks, 1 x (1 x npages) grid, 4 x 4 threads;
// then 32 x 32 blocks to build 64 x 64 blocks, 1 x (2 x npages) grid, 8 x 4 threads;
// then 64 x 64 blocks to build 128 x 128 blocks, 1 x (4 x npages) grid, 16 x 4 threads;
// then 128 x 128 blocks to build 256 x 256 blocks, 2 x (8 x npages) grid, 16 x 4 threads.
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
//printf( "n %d, jb %d, grid %d x %d (%d x %d)\n", n, jb, grid.x, grid.y, grid.y / npages, npages );
switch (jb) {
case 16:
triple_dgemm16_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm16_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_dgemm32_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm32_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_dgemm64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_dgemm_above64_part1_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm_above64_part2_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm_above64_part3_lower_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
else {
dim3 diaggrid( nblocks, 1, batchCount ); // emulate 3D grid
dtrtri_diag_upper_kernel_batched
<<< diaggrid, IB, 0, queue->cuda_stream() >>>
( diag, n, dA_array, ldda, dinvA_array );
// update the inverse up to the size of IB
for( int jb=IB; jb < NB; jb *= 2 ) {
int kb = jb*2;
int npages = magma_ceildiv( n, kb );
dim3 threads( (jb <= 32 ? jb/4 : 16), 4 );
dim3 grid( jb/(threads.x*threads.y), npages*(jb/16), batchCount ); // emulate 3D grid: NX * (NY*npages), for CUDA ARCH 1.x
switch (jb) {
case 16:
triple_dgemm16_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm16_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 32:
triple_dgemm32_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm32_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
case 64:
triple_dgemm64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
default:
triple_dgemm_above64_part1_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm_above64_part2_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
triple_dgemm_above64_part3_upper_kernel_batched<<< grid, threads, 0, queue->cuda_stream() >>>( n, dA_array, ldda, dinvA_array, jb, npages );
break;
}
if ( kb >= n ) break;
}
}
}
|
50fa292540e4374ea6d23e6cf2a7dbcad3471110.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernelSmoothY.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float const *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
float *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernelSmoothY), dim3(gridBlock),dim3(threadBlock), 0, 0, in,w,h,out);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernelSmoothY), dim3(gridBlock),dim3(threadBlock), 0, 0, in,w,h,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernelSmoothY), dim3(gridBlock),dim3(threadBlock), 0, 0, in,w,h,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
50fa292540e4374ea6d23e6cf2a7dbcad3471110.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernelSmoothY.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float const *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int w = XSIZE;
int h = YSIZE;
float *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernelSmoothY<<<gridBlock,threadBlock>>>(in,w,h,out);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernelSmoothY<<<gridBlock,threadBlock>>>(in,w,h,out);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernelSmoothY<<<gridBlock,threadBlock>>>(in,w,h,out);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.